2026-03-10T05:12:35.368 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T05:12:35.373 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T05:12:35.393 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/909 branch: squid description: orch/cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} email: null first_in_suite: false flavor: default job_id: '909' last_in_suite: false machine_type: vps name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_REFRESH_FAILED log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-squid sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - - host.b - osd.4 - osd.5 - osd.6 - osd.7 seed: 8043 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b targets: vm00.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFW7ybuh02uHZpKrxFwd028wlB6ILbwWQYNTYs9+IxSqADgULfuqYiJOGh6Jew1CoNq/sUeJlpjhvnDw8h2WUVM= vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI/0kz+bo3YDykwybcbBMT0WphkpjzilY9+3VDsteWb3aeXi+1Y4TVxFLb0O550q38GXXOeGDIKCMNL69u2wzx8= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs - cephadm.wait_for_service: service: mds.foofs - cephadm.shell: host.a: - ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2 - ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done - vip.exec: host.a: - mkdir /mnt/foo - while ! mount -t nfs $(hostname):/fake /mnt/foo -o sync ; do sleep 5 ; done - echo test > /mnt/foo/testfile - sync - parallel: - upgrade-tasks - workload-tasks - vip.exec: host.a: - umount /mnt/foo - cephadm.shell: host.a: - ceph nfs cluster ls | grep foo - ceph nfs export ls foo --detailed - rados -p .nfs --all ls - - cephadm.shell: host.a: - 'set -ex [[ `ceph config get mgr mgr/cephadm/migration_current` -gt 2 ]] ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_01:00:38 tube: vps upgrade-tasks: sequential: - cephadm.shell: env: - sha1 host.a: - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph mgr module enable nfs --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 host.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph orch upgrade status - ceph health detail - ceph versions - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - cephadm.wait_for_service: service: nfs.foo user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 workload-tasks: sequential: - exec: host.a: - cd /mnt/foo && dbench 5 -t 600 || true - umount /mnt/foo - while ! mount -t nfs $(hostname):/fake /mnt/foo ; do sleep 5 ; done - cd /mnt/foo && dbench 5 -t 5 2026-03-10T05:12:35.393 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa; will attempt to use it 2026-03-10T05:12:35.394 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa/tasks 2026-03-10T05:12:35.394 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-10T05:12:35.394 INFO:teuthology.task.internal:Checking packages... 2026-03-10T05:12:35.394 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-10T05:12:35.394 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-10T05:12:35.394 INFO:teuthology.packaging:ref: None 2026-03-10T05:12:35.394 INFO:teuthology.packaging:tag: None 2026-03-10T05:12:35.394 INFO:teuthology.packaging:branch: squid 2026-03-10T05:12:35.394 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T05:12:35.395 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-10T05:12:36.145 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-10T05:12:36.146 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-10T05:12:36.146 INFO:teuthology.task.internal:no buildpackages task found 2026-03-10T05:12:36.146 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T05:12:36.147 INFO:teuthology.task.internal:Saving configuration 2026-03-10T05:12:36.152 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T05:12:36.153 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T05:12:36.160 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm00.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/909', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 05:10:57.364440', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:00', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFW7ybuh02uHZpKrxFwd028wlB6ILbwWQYNTYs9+IxSqADgULfuqYiJOGh6Jew1CoNq/sUeJlpjhvnDw8h2WUVM='} 2026-03-10T05:12:36.165 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm03.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/909', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 05:10:57.365008', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:03', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI/0kz+bo3YDykwybcbBMT0WphkpjzilY9+3VDsteWb3aeXi+1Y4TVxFLb0O550q38GXXOeGDIKCMNL69u2wzx8='} 2026-03-10T05:12:36.165 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T05:12:36.166 INFO:teuthology.task.internal:roles: ubuntu@vm00.local - ['host.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0'] 2026-03-10T05:12:36.166 INFO:teuthology.task.internal:roles: ubuntu@vm03.local - ['host.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7'] 2026-03-10T05:12:36.166 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T05:12:36.172 DEBUG:teuthology.task.console_log:vm00 does not support IPMI; excluding 2026-03-10T05:12:36.178 DEBUG:teuthology.task.console_log:vm03 does not support IPMI; excluding 2026-03-10T05:12:36.178 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7facb2372170>, signals=[15]) 2026-03-10T05:12:36.178 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T05:12:36.179 INFO:teuthology.task.internal:Opening connections... 2026-03-10T05:12:36.179 DEBUG:teuthology.task.internal:connecting to ubuntu@vm00.local 2026-03-10T05:12:36.179 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T05:12:36.241 DEBUG:teuthology.task.internal:connecting to ubuntu@vm03.local 2026-03-10T05:12:36.242 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T05:12:36.300 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T05:12:36.302 DEBUG:teuthology.orchestra.run.vm00:> uname -m 2026-03-10T05:12:36.357 INFO:teuthology.orchestra.run.vm00.stdout:x86_64 2026-03-10T05:12:36.357 DEBUG:teuthology.orchestra.run.vm00:> cat /etc/os-release 2026-03-10T05:12:36.414 INFO:teuthology.orchestra.run.vm00.stdout:NAME="CentOS Stream" 2026-03-10T05:12:36.414 INFO:teuthology.orchestra.run.vm00.stdout:VERSION="9" 2026-03-10T05:12:36.414 INFO:teuthology.orchestra.run.vm00.stdout:ID="centos" 2026-03-10T05:12:36.414 INFO:teuthology.orchestra.run.vm00.stdout:ID_LIKE="rhel fedora" 2026-03-10T05:12:36.414 INFO:teuthology.orchestra.run.vm00.stdout:VERSION_ID="9" 2026-03-10T05:12:36.414 INFO:teuthology.orchestra.run.vm00.stdout:PLATFORM_ID="platform:el9" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:ANSI_COLOR="0;31" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:LOGO="fedora-logo-icon" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:HOME_URL="https://centos.org/" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T05:12:36.415 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T05:12:36.415 INFO:teuthology.lock.ops:Updating vm00.local on lock server 2026-03-10T05:12:36.420 DEBUG:teuthology.orchestra.run.vm03:> uname -m 2026-03-10T05:12:36.434 INFO:teuthology.orchestra.run.vm03.stdout:x86_64 2026-03-10T05:12:36.435 DEBUG:teuthology.orchestra.run.vm03:> cat /etc/os-release 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:NAME="CentOS Stream" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:VERSION="9" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:ID="centos" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:ID_LIKE="rhel fedora" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:VERSION_ID="9" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:PLATFORM_ID="platform:el9" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:ANSI_COLOR="0;31" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:LOGO="fedora-logo-icon" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:HOME_URL="https://centos.org/" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T05:12:36.490 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T05:12:36.490 INFO:teuthology.lock.ops:Updating vm03.local on lock server 2026-03-10T05:12:36.494 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T05:12:36.496 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T05:12:36.497 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T05:12:36.497 DEBUG:teuthology.orchestra.run.vm00:> test '!' -e /home/ubuntu/cephtest 2026-03-10T05:12:36.499 DEBUG:teuthology.orchestra.run.vm03:> test '!' -e /home/ubuntu/cephtest 2026-03-10T05:12:36.544 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T05:12:36.546 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T05:12:36.546 DEBUG:teuthology.orchestra.run.vm00:> test -z $(ls -A /var/lib/ceph) 2026-03-10T05:12:36.555 DEBUG:teuthology.orchestra.run.vm03:> test -z $(ls -A /var/lib/ceph) 2026-03-10T05:12:36.568 INFO:teuthology.orchestra.run.vm00.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T05:12:36.600 INFO:teuthology.orchestra.run.vm03.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T05:12:36.600 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T05:12:36.608 DEBUG:teuthology.orchestra.run.vm00:> test -e /ceph-qa-ready 2026-03-10T05:12:36.623 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T05:12:36.834 DEBUG:teuthology.orchestra.run.vm03:> test -e /ceph-qa-ready 2026-03-10T05:12:36.849 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T05:12:37.040 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T05:12:37.042 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T05:12:37.042 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T05:12:37.044 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T05:12:37.062 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T05:12:37.064 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T05:12:37.065 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T05:12:37.065 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T05:12:37.100 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T05:12:37.123 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T05:12:37.124 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T05:12:37.124 DEBUG:teuthology.orchestra.run.vm00:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T05:12:37.168 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T05:12:37.168 DEBUG:teuthology.orchestra.run.vm03:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T05:12:37.185 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T05:12:37.185 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T05:12:37.210 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T05:12:37.232 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T05:12:37.241 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T05:12:37.252 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T05:12:37.261 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T05:12:37.263 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T05:12:37.264 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T05:12:37.264 DEBUG:teuthology.orchestra.run.vm00:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T05:12:37.285 DEBUG:teuthology.orchestra.run.vm03:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T05:12:37.330 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T05:12:37.332 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T05:12:37.332 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T05:12:37.347 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T05:12:37.386 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T05:12:37.421 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T05:12:37.481 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:12:37.481 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T05:12:37.539 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T05:12:37.566 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T05:12:37.619 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T05:12:37.620 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T05:12:37.678 DEBUG:teuthology.orchestra.run.vm00:> sudo service rsyslog restart 2026-03-10T05:12:37.680 DEBUG:teuthology.orchestra.run.vm03:> sudo service rsyslog restart 2026-03-10T05:12:37.706 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T05:12:37.743 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T05:12:38.086 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T05:12:38.088 INFO:teuthology.task.internal:Starting timer... 2026-03-10T05:12:38.088 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T05:12:38.091 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T05:12:38.093 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-10T05:12:38.093 INFO:teuthology.task.selinux:Excluding vm00: VMs are not yet supported 2026-03-10T05:12:38.093 INFO:teuthology.task.selinux:Excluding vm03: VMs are not yet supported 2026-03-10T05:12:38.093 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T05:12:38.093 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T05:12:38.093 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T05:12:38.093 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T05:12:38.095 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T05:12:38.095 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-10T05:12:38.096 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-10T05:12:38.690 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-10T05:12:38.696 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T05:12:38.696 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventorynasp8hgh --limit vm00.local,vm03.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T05:15:06.301 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm00.local'), Remote(name='ubuntu@vm03.local')] 2026-03-10T05:15:06.301 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm00.local' 2026-03-10T05:15:06.301 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T05:15:06.369 DEBUG:teuthology.orchestra.run.vm00:> true 2026-03-10T05:15:06.453 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm00.local' 2026-03-10T05:15:06.453 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm03.local' 2026-03-10T05:15:06.454 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T05:15:06.520 DEBUG:teuthology.orchestra.run.vm03:> true 2026-03-10T05:15:06.599 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm03.local' 2026-03-10T05:15:06.599 INFO:teuthology.run_tasks:Running task clock... 2026-03-10T05:15:06.603 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-10T05:15:06.603 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T05:15:06.603 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T05:15:06.605 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T05:15:06.605 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T05:15:06.645 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T05:15:06.664 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T05:15:06.682 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T05:15:06.696 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T05:15:06.696 INFO:teuthology.orchestra.run.vm00.stderr:sudo: ntpd: command not found 2026-03-10T05:15:06.708 INFO:teuthology.orchestra.run.vm00.stdout:506 Cannot talk to daemon 2026-03-10T05:15:06.725 INFO:teuthology.orchestra.run.vm03.stderr:sudo: ntpd: command not found 2026-03-10T05:15:06.726 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T05:15:06.736 INFO:teuthology.orchestra.run.vm03.stdout:506 Cannot talk to daemon 2026-03-10T05:15:06.745 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T05:15:06.749 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T05:15:06.761 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T05:15:06.792 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-03-10T05:15:06.805 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm03.stdout:^? ntp.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm03.stdout:^? www.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm03.stdout:^? time.bauer-group.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm03.stdout:^? de.relay.mahi.be 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm00.stdout:^? www.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm00.stdout:^? time.bauer-group.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm00.stdout:^? de.relay.mahi.be 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.969 INFO:teuthology.orchestra.run.vm00.stdout:^? ntp.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-10T05:15:06.970 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-10T05:15:07.018 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_REFRESH_FAILED'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-10T05:15:07.019 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-10T05:15:07.019 INFO:tasks.cephadm:Cluster fsid is 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:07.019 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-10T05:15:07.019 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-10T05:15:07.019 INFO:tasks.cephadm:Monitor IPs: {'mon.vm00': '192.168.123.100', 'mon.vm03': '192.168.123.103'} 2026-03-10T05:15:07.019 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-10T05:15:07.019 DEBUG:teuthology.orchestra.run.vm00:> sudo hostname $(hostname -s) 2026-03-10T05:15:07.053 DEBUG:teuthology.orchestra.run.vm03:> sudo hostname $(hostname -s) 2026-03-10T05:15:07.084 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-10T05:15:07.084 DEBUG:teuthology.orchestra.run.vm00:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T05:15:07.375 INFO:teuthology.orchestra.run.vm00.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 05:15 /home/ubuntu/cephtest/cephadm 2026-03-10T05:15:07.376 DEBUG:teuthology.orchestra.run.vm03:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T05:15:07.473 INFO:teuthology.orchestra.run.vm03.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 05:15 /home/ubuntu/cephtest/cephadm 2026-03-10T05:15:07.473 DEBUG:teuthology.orchestra.run.vm00:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T05:15:07.498 DEBUG:teuthology.orchestra.run.vm03:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T05:15:07.526 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-10T05:15:07.526 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T05:15:07.540 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T05:15:07.770 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T05:15:07.782 INFO:teuthology.orchestra.run.vm03.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout: "repo_digests": [ 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout: ] 2026-03-10T05:15:31.534 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-10T05:15:31.596 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T05:15:31.597 INFO:teuthology.orchestra.run.vm00.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T05:15:31.597 INFO:teuthology.orchestra.run.vm00.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T05:15:31.597 INFO:teuthology.orchestra.run.vm00.stdout: "repo_digests": [ 2026-03-10T05:15:31.597 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T05:15:31.597 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T05:15:31.597 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T05:15:31.597 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T05:15:31.618 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /etc/ceph 2026-03-10T05:15:31.652 DEBUG:teuthology.orchestra.run.vm03:> sudo mkdir -p /etc/ceph 2026-03-10T05:15:31.684 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 777 /etc/ceph 2026-03-10T05:15:31.719 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 777 /etc/ceph 2026-03-10T05:15:31.754 INFO:tasks.cephadm:Writing seed config... 2026-03-10T05:15:31.754 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-10T05:15:31.754 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-10T05:15:31.754 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-10T05:15:31.754 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-10T05:15:31.755 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-10T05:15:31.755 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-10T05:15:31.755 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-10T05:15:31.755 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-10T05:15:31.755 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-10T05:15:31.755 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:15:31.755 DEBUG:teuthology.orchestra.run.vm00:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-10T05:15:31.779 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 1a50eb6e-1c40-11f1-854f-9d3053100916 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-10T05:15:31.779 DEBUG:teuthology.orchestra.run.vm00:mon.vm00> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mon.vm00.service 2026-03-10T05:15:31.820 INFO:tasks.cephadm:Bootstrapping... 2026-03-10T05:15:31.820 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.100 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-10T05:15:31.990 INFO:teuthology.orchestra.run.vm00.stderr:-------------------------------------------------------------------------------- 2026-03-10T05:15:31.990 INFO:teuthology.orchestra.run.vm00.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', '1a50eb6e-1c40-11f1-854f-9d3053100916', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.100', '--skip-admin-label'] 2026-03-10T05:15:32.012 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T05:15:32.019 INFO:teuthology.orchestra.run.vm00.stderr:Verifying podman|docker is present... 2026-03-10T05:15:32.037 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T05:15:32.043 INFO:teuthology.orchestra.run.vm00.stderr:Verifying lvm2 is present... 2026-03-10T05:15:32.043 INFO:teuthology.orchestra.run.vm00.stderr:Verifying time synchronization is in place... 2026-03-10T05:15:32.053 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T05:15:32.062 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-10T05:15:32.070 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-10T05:15:32.079 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-10T05:15:32.083 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-10T05:15:32.083 INFO:teuthology.orchestra.run.vm00.stderr:Repeating the final host check... 2026-03-10T05:15:32.102 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T05:15:32.106 INFO:teuthology.orchestra.run.vm00.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-10T05:15:32.106 INFO:teuthology.orchestra.run.vm00.stderr:systemctl is present 2026-03-10T05:15:32.106 INFO:teuthology.orchestra.run.vm00.stderr:lvcreate is present 2026-03-10T05:15:32.113 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T05:15:32.123 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-10T05:15:32.130 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-10T05:15:32.139 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-10T05:15:32.140 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-10T05:15:32.140 INFO:teuthology.orchestra.run.vm00.stderr:Host looks OK 2026-03-10T05:15:32.140 INFO:teuthology.orchestra.run.vm00.stderr:Cluster fsid: 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:32.141 INFO:teuthology.orchestra.run.vm00.stderr:Acquiring lock 139838062198640 on /run/cephadm/1a50eb6e-1c40-11f1-854f-9d3053100916.lock 2026-03-10T05:15:32.141 INFO:teuthology.orchestra.run.vm00.stderr:Lock 139838062198640 acquired on /run/cephadm/1a50eb6e-1c40-11f1-854f-9d3053100916.lock 2026-03-10T05:15:32.141 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 3300 ... 2026-03-10T05:15:32.142 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 6789 ... 2026-03-10T05:15:32.142 INFO:teuthology.orchestra.run.vm00.stderr:Base mon IP is 192.168.123.100, final addrv is [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-10T05:15:32.147 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.100 metric 100 2026-03-10T05:15:32.147 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.100 metric 100 2026-03-10T05:15:32.152 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-10T05:15:32.152 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-10T05:15:32.155 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-10T05:15:32.155 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-10T05:15:32.155 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T05:15:32.156 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-10T05:15:32.156 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:0/64 scope link noprefixroute 2026-03-10T05:15:32.156 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T05:15:32.157 INFO:teuthology.orchestra.run.vm00.stderr:Mon IP `192.168.123.100` is in CIDR network `192.168.123.0/24` 2026-03-10T05:15:32.157 INFO:teuthology.orchestra.run.vm00.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-10T05:15:32.158 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T05:15:32.186 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-10T05:15:33.526 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Getting image source signatures 2026-03-10T05:15:33.526 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-10T05:15:33.526 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-10T05:15:33.527 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-10T05:15:33.527 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-10T05:15:33.527 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-10T05:15:33.527 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T05:15:33.530 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Writing manifest to image destination 2026-03-10T05:15:33.535 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T05:15:33.699 INFO:teuthology.orchestra.run.vm00.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T05:15:33.764 INFO:teuthology.orchestra.run.vm00.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T05:15:33.764 INFO:teuthology.orchestra.run.vm00.stderr:Extracting ceph user uid/gid from container image... 2026-03-10T05:15:33.843 INFO:teuthology.orchestra.run.vm00.stderr:stat: 167 167 2026-03-10T05:15:33.871 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial keys... 2026-03-10T05:15:34.107 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQD2qK9ppucxAxAAFy5DNH5R9BHfwsa8vH2cNQ== 2026-03-10T05:15:34.250 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQD2qK9pODLMDhAAT9NNaDKgb0nThI6RiXZrMw== 2026-03-10T05:15:34.388 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQD2qK9pEGMHFxAAK3uBhBPBk7rle7PStYpovQ== 2026-03-10T05:15:34.468 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial monmap... 2026-03-10T05:15:34.592 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T05:15:34.592 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-10T05:15:34.592 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:34.592 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T05:15:34.622 INFO:teuthology.orchestra.run.vm00.stderr:monmaptool for vm00 [v2:192.168.123.100:3300,v1:192.168.123.100:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T05:15:34.622 INFO:teuthology.orchestra.run.vm00.stderr:setting min_mon_release = octopus 2026-03-10T05:15:34.622 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: set fsid to 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:34.622 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T05:15:34.622 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:15:34.622 INFO:teuthology.orchestra.run.vm00.stderr:Creating mon... 2026-03-10T05:15:34.797 INFO:teuthology.orchestra.run.vm00.stderr:create mon.vm00 on 2026-03-10T05:15:35.008 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-10T05:15:35.183 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916.target → /etc/systemd/system/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916.target. 2026-03-10T05:15:35.183 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916.target → /etc/systemd/system/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916.target. 2026-03-10T05:15:35.924 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mon.vm00.service: Unit ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mon.vm00.service not loaded. 2026-03-10T05:15:35.933 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916.target.wants/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mon.vm00.service → /etc/systemd/system/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@.service. 2026-03-10T05:15:36.853 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T05:15:36.853 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T05:15:36.854 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon to start... 2026-03-10T05:15:36.854 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon... 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: cluster: 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: id: 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: services: 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon: 1 daemons, quorum vm00 (age 0.152925s) 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: data: 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pgs: 2026-03-10T05:15:37.073 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:37.133 INFO:teuthology.orchestra.run.vm00.stderr:mon is available 2026-03-10T05:15:37.133 INFO:teuthology.orchestra.run.vm00.stderr:Assimilating anything we can from ceph.conf... 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T05:15:37.361 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T05:15:37.395 INFO:teuthology.orchestra.run.vm00.stderr:Generating new minimal ceph.conf... 2026-03-10T05:15:38.209 INFO:teuthology.orchestra.run.vm00.stderr:Restarting the monitor... 2026-03-10T05:15:38.490 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00[49703]: 2026-03-10T05:15:38.481+0000 7f43e296b700 -1 mon.vm00@0(leader) e1 *** Got Signal Terminated *** 2026-03-10T05:15:38.743 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 podman[49904]: 2026-03-10 05:15:38.688550823 +0000 UTC m=+0.425116762 container died e61212377f04055b9efff8877b6ecc79c935d112b169d6896b95ed03be7458da (image=quay.io/ceph/ceph:v17.2.0, name=ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , GIT_BRANCH=HEAD, ceph=True, version=8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, RELEASE=HEAD, vendor=Red Hat, Inc., io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, release=754) 2026-03-10T05:15:38.743 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 podman[49904]: 2026-03-10 05:15:38.717566348 +0000 UTC m=+0.454132277 container remove e61212377f04055b9efff8877b6ecc79c935d112b169d6896b95ed03be7458da (image=quay.io/ceph/ceph:v17.2.0, name=ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.expose-services=, vendor=Red Hat, Inc., version=8, com.redhat.component=centos-stream-container, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, vcs-type=git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, RELEASE=HEAD, io.buildah.version=1.19.8) 2026-03-10T05:15:38.743 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 bash[49904]: ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00 2026-03-10T05:15:38.994 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 bash[49922]: Error: no container with name or ID "ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon.vm00" found: no such container 2026-03-10T05:15:38.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 systemd[1]: ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mon.vm00.service: Deactivated successfully. 2026-03-10T05:15:38.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 systemd[1]: Stopped Ceph mon.vm00 for 1a50eb6e-1c40-11f1-854f-9d3053100916. 2026-03-10T05:15:38.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 systemd[1]: Starting Ceph mon.vm00 for 1a50eb6e-1c40-11f1-854f-9d3053100916... 2026-03-10T05:15:38.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 podman[49965]: 2026-03-10 05:15:38.95305033 +0000 UTC m=+0.098152510 container create d3f5c725c1459eda67fc85704dc1341fc271e876e514832ba92fd61047ac260e (image=quay.io/ceph/ceph:v17.2.0, name=ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, name=centos-stream, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream, vcs-type=git, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, architecture=x86_64, ceph=True, CEPH_POINT_RELEASE=-17.2.0, io.openshift.expose-services=, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T05:15:38.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 podman[49965]: 2026-03-10 05:15:38.888042491 +0000 UTC m=+0.033144671 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph:v17.2.0 2026-03-10T05:15:39.018 INFO:teuthology.orchestra.run.vm00.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 podman[49965]: 2026-03-10 05:15:38.992920529 +0000 UTC m=+0.138022709 container init d3f5c725c1459eda67fc85704dc1341fc271e876e514832ba92fd61047ac260e (image=quay.io/ceph/ceph:v17.2.0, name=ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00, distribution-scope=public, build-date=2022-05-03T08:36:31.336870, architecture=x86_64, CEPH_POINT_RELEASE=-17.2.0, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, io.buildah.version=1.19.8, name=centos-stream, vcs-type=git, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, io.openshift.tags=base centos centos-stream, ceph=True, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.expose-services=, GIT_BRANCH=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, release=754) 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:38 vm00 podman[49965]: 2026-03-10 05:15:38.998774295 +0000 UTC m=+0.143876475 container start d3f5c725c1459eda67fc85704dc1341fc271e876e514832ba92fd61047ac260e (image=quay.io/ceph/ceph:v17.2.0, name=ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00, io.openshift.expose-services=, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, architecture=x86_64, GIT_REPO=https://github.com/ceph/ceph-container.git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, io.buildah.version=1.19.8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream, CEPH_POINT_RELEASE=-17.2.0, vcs-type=git, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, release=754, distribution-scope=public, ceph=True) 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 bash[49965]: d3f5c725c1459eda67fc85704dc1341fc271e876e514832ba92fd61047ac260e 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 systemd[1]: Started Ceph mon.vm00 for 1a50eb6e-1c40-11f1-854f-9d3053100916. 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable), process ceph-mon, pid 2 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: pidfile_write: ignore empty --pid-file 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: load: jerasure load: lrc 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: RocksDB version: 6.15.5 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Git sha rocksdb_build_git_sha:@0@ 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Compile date Apr 18 2022 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: DB SUMMARY 2026-03-10T05:15:39.245 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: DB Session ID: 2RZBLTJY1HO6LQUCV7UE 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: CURRENT file: CURRENT 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: MANIFEST file: MANIFEST-000009 size: 131 Bytes 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm00/store.db dir, Total Num: 1, files: 000008.sst 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm00/store.db: 000010.log size: 81689 ; 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.error_if_exists: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.create_if_missing: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.paranoid_checks: 1 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.env: 0x55caf6cc6860 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.fs: Posix File System 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.info_log: 0x55caf944dee0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.statistics: (nil) 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.use_fsync: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_log_file_size: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.allow_fallocate: 1 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.use_direct_reads: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.db_log_dir: 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-vm00/store.db 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.write_buffer_manager: 0x55caf953e2a0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.new_table_reader_for_compaction_inputs: 0 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T05:15:39.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.unordered_write: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.row_cache: None 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.wal_filter: None 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.preserve_deletes: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.two_write_queues: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.atomic_flush: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.log_readahead_size: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_background_jobs: 2 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_background_compactions: -1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_subcompactions: 1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_open_files: -1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_background_flushes: -1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Compression algorithms supported: 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kZSTD supported: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kXpressCompression supported: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kLZ4Compression supported: 1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kBZip2Compression supported: 0 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kZlibCompression supported: 1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: kSnappyCompression supported: 1 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T05:15:39.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm00/store.db/MANIFEST-000009 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.merge_operator: 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_filter: None 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55caf9419d00) 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_type: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: data_block_index_type: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_shortening: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: hash_index_allow_collision: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: checksum: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: no_block_cache: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache: 0x55caf9484170 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_options: 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: capacity : 536870912 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: num_shard_bits : 4 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: strict_capacity_limit : 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_compressed: (nil) 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: persistent_cache: (nil) 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_size: 4096 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_size_deviation: 10 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_restart_interval: 16 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_block_restart_interval: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: metadata_block_size: 4096 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: partition_filters: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: use_delta_encoding: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: whole_key_filtering: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: verify_compression: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: format_version: 4 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: enable_index_compression: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_align: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression: NoCompression 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.num_levels: 7 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T05:15:39.248 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.arena_block_size: 4194304 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.table_properties_collectors: 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.inplace_update_support: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.bloom_locality: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.max_successive_merges: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.ttl: 2592000 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.enable_blob_files: false 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.min_blob_size: 0 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T05:15:39.249 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm00/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 11, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 5 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/version_set.cc:4083] Creating manifest 13 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773119739032988, "job": 1, "event": "recovery_started", "wal_files": [10]} 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #10 mode 2 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773119739034432, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 14, "file_size": 78660, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 76929, "index_size": 224, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 9826, "raw_average_key_size": 47, "raw_value_size": 71290, "raw_average_value_size": 347, "num_data_blocks": 10, "num_entries": 205, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773119739, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "c828c594-8d01-4b65-b6e3-b1ae5444e2f2", "db_session_id": "2RZBLTJY1HO6LQUCV7UE"}} 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/version_set.cc:4083] Creating manifest 15 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773119739035899, "job": 1, "event": "recovery_finished"} 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-vm00/store.db/000010.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x55caf946a700 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: DB pointer 0x55caf94de000 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/db_impl/db_impl.cc:902] ------- DUMPING STATS ------- 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: rocksdb: [db/db_impl/db_impl.cc:903] 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** DB Stats ** 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: L0 2/0 78.59 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.7 0.00 0.00 1 0.001 0 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Sum 2/0 78.59 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.7 0.00 0.00 1 0.001 0 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.7 0.00 0.00 1 0.001 0 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 60.7 0.00 0.00 1 0.001 0 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative compaction: 0.00 GB write, 3.87 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval compaction: 0.00 GB write, 3.87 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T05:15:39.250 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: L0 2/0 78.59 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.7 0.00 0.00 1 0.001 0 0 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Sum 2/0 78.59 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.7 0.00 0.00 1 0.001 0 0 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 60.7 0.00 0.00 1 0.001 0 0 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative compaction: 0.00 GB write, 3.86 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T05:15:39.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:39 vm00 ceph-mon[49980]: starting mon.vm00 rank 0 at public addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] at bind addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon_data /var/lib/ceph/mon/ceph-vm00 fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:39.392 INFO:teuthology.orchestra.run.vm00.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-10T05:15:39.393 INFO:teuthology.orchestra.run.vm00.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-10T05:15:39.393 INFO:teuthology.orchestra.run.vm00.stderr:Creating mgr... 2026-03-10T05:15:39.394 INFO:teuthology.orchestra.run.vm00.stderr:Verifying port 9283 ... 2026-03-10T05:15:39.579 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mgr.vm00.vnepyw.service: Unit ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mgr.vm00.vnepyw.service not loaded. 2026-03-10T05:15:39.586 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916.target.wants/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mgr.vm00.vnepyw.service → /etc/systemd/system/ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@.service. 2026-03-10T05:15:39.959 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T05:15:39.959 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T05:15:39.959 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T05:15:39.959 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-10T05:15:39.959 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr to start... 2026-03-10T05:15:39.959 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr... 2026-03-10T05:15:40.256 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:40 vm00 ceph-mon[49980]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-03-10T05:15:40.256 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:40 vm00 ceph-mon[49980]: monmap e1: 1 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0]} 2026-03-10T05:15:40.256 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:40 vm00 ceph-mon[49980]: fsmap 2026-03-10T05:15:40.256 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:40 vm00 ceph-mon[49980]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T05:15:40.256 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:40 vm00 ceph-mon[49980]: mgrmap e1: no daemons active 2026-03-10T05:15:40.256 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:40 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/445991605' entity='client.admin' 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "1a50eb6e-1c40-11f1-854f-9d3053100916", 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:40.264 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 1, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:40.265 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T05:15:36.915661+0000", 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T05:15:40.266 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:15:40.353 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (1/15)... 2026-03-10T05:15:41.267 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:41 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1579308259' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:15:42.701 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "1a50eb6e-1c40-11f1-854f-9d3053100916", 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T05:15:42.702 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T05:15:36.915661+0000", 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T05:15:42.703 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:15:42.744 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (2/15)... 2026-03-10T05:15:42.775 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:42 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/3191018435' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:15:45.078 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "1a50eb6e-1c40-11f1-854f-9d3053100916", 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:45.079 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 6, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T05:15:36.915661+0000", 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T05:15:45.080 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:15:45.118 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (3/15)... 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: Activating manager daemon vm00.vnepyw 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: mgrmap e2: vm00.vnepyw(active, starting, since 0.0820428s) 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:45.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:45 vm00 ceph-mon[49980]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:46.237 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:46 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/714162547' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:15:46.237 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:46 vm00 ceph-mon[49980]: mgrmap e3: vm00.vnepyw(active, since 1.08539s) 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "1a50eb6e-1c40-11f1-854f-9d3053100916", 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 8, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T05:15:47.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T05:15:36.915661+0000", 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T05:15:47.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T05:15:47.483 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:15:47.517 INFO:teuthology.orchestra.run.vm00.stderr:mgr is available 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T05:15:47.778 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T05:15:47.779 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-10T05:15:47.779 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T05:15:47.779 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T05:15:47.779 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T05:15:47.810 INFO:teuthology.orchestra.run.vm00.stderr:Enabling cephadm module... 2026-03-10T05:15:48.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:48 vm00 ceph-mon[49980]: mgrmap e4: vm00.vnepyw(active, since 2s) 2026-03-10T05:15:48.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:48 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/4138959648' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:15:48.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:48 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/4030771869' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T05:15:49.272 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:49 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1972491054' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T05:15:49.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:15:49.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-10T05:15:49.482 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T05:15:49.483 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "vm00.vnepyw", 2026-03-10T05:15:49.483 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T05:15:49.483 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:15:49.567 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-10T05:15:49.567 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 5... 2026-03-10T05:15:50.279 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:50 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1972491054' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T05:15:50.279 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:50 vm00 ceph-mon[49980]: mgrmap e5: vm00.vnepyw(active, since 4s) 2026-03-10T05:15:50.279 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:50 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2390471054' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T05:15:54.084 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:53 vm00 ceph-mon[49980]: Active manager daemon vm00.vnepyw restarted 2026-03-10T05:15:54.084 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:53 vm00 ceph-mon[49980]: Activating manager daemon vm00.vnepyw 2026-03-10T05:15:54.084 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:53 vm00 ceph-mon[49980]: osdmap e2: 0 total, 0 up, 0 in 2026-03-10T05:15:55.018 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:15:55.018 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-10T05:15:55.018 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T05:15:55.018 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:15:55.048 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 5 is available 2026-03-10T05:15:55.048 INFO:teuthology.orchestra.run.vm00.stderr:Setting orchestrator backend to cephadm... 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: mgrmap e6: vm00.vnepyw(active, starting, since 0.0999647s) 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:15:55.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:15:55.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:55 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:15:55.705 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: value unchanged 2026-03-10T05:15:55.768 INFO:teuthology.orchestra.run.vm00.stderr:Generating ssh key... 2026-03-10T05:15:56.127 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:56 vm00 ceph-mon[49980]: mgrmap e7: vm00.vnepyw(active, since 1.10904s) 2026-03-10T05:15:56.127 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:56 vm00 ceph-mon[49980]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T05:15:56.127 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:56 vm00 ceph-mon[49980]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T05:15:56.127 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:56 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:56.127 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:56 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:15:56.127 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:56 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:56.127 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:56 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:15:56.438 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDISxM2CzCCFE6Tt4JOwCT0uhP6j+lomEzG1U869a/ZsH4tCone9Fap+x0RyS1VB3r2dg9czxDsAj3AgcofNbmJWetAsEJlMyqSQ1I0TM/21jmfCV2ctJ6MPAJGx8ptJ8s//eclc8dIw8UskdyMeDfF61a2SlklIDUfvhZokJVQqt0nj82xDwa0BFcZvMWHFbaE5f+hx93j/9KT+3vSV/oeh9b4Sld7wri1YEER2duwRJBvvCjXFO1MiQNWAhOpr8aWY7QgBQtMMOU++r5CJcC9uAFCdwGechxLCo+zH/ljeevNH26lI8AIOVh4rER0TlGnP9sLJ8aujjkC654B2C/9AvPd0xeKC/TepZ6ELYlCHBAWMeqDOgdUumoSH5eMbaKGud0SV17lIyVkpqpA3hHcdy3keLEIDlSzWaovqvRDMRdXcFjCfmHx8heDEf6PvlagxJT5Ifi3h7gvjAraXzfiLOj7+daO3OT1S2jqkJ3m12KX2oYpLLwpMPRgwmEgfM0= ceph-1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:15:56.482 INFO:teuthology.orchestra.run.vm00.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-10T05:15:56.482 INFO:teuthology.orchestra.run.vm00.stderr:Adding key to root@localhost authorized_keys... 2026-03-10T05:15:56.482 INFO:teuthology.orchestra.run.vm00.stderr:Adding host vm00... 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: [10/Mar/2026:05:15:55] ENGINE Bus STARTING 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: [10/Mar/2026:05:15:55] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: [10/Mar/2026:05:15:55] ENGINE Bus STARTED 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: Generating ssh key... 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:57.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:57 vm00 ceph-mon[49980]: mgrmap e8: vm00.vnepyw(active, since 2s) 2026-03-10T05:15:57.507 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Added host 'vm00' with addr '192.168.123.100' 2026-03-10T05:15:57.576 INFO:teuthology.orchestra.run.vm00.stderr:Deploying mon service with default placement... 2026-03-10T05:15:57.930 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-10T05:15:57.972 INFO:teuthology.orchestra.run.vm00.stderr:Deploying mgr service with default placement... 2026-03-10T05:15:58.148 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:58 vm00 ceph-mon[49980]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:15:58.148 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:58 vm00 ceph-mon[49980]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm00", "addr": "192.168.123.100", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:15:58.148 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:58 vm00 ceph-mon[49980]: Deploying cephadm binary to vm00 2026-03-10T05:15:58.148 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:58 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:58.148 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:58 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:15:58.148 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:58 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:58.260 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-10T05:15:58.296 INFO:teuthology.orchestra.run.vm00.stderr:Deploying crash service with default placement... 2026-03-10T05:15:58.581 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled crash update... 2026-03-10T05:15:58.618 INFO:teuthology.orchestra.run.vm00.stderr:Deploying prometheus service with default placement... 2026-03-10T05:15:58.893 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled prometheus update... 2026-03-10T05:15:58.958 INFO:teuthology.orchestra.run.vm00.stderr:Deploying grafana service with default placement... 2026-03-10T05:15:59.174 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:59 vm00 ceph-mon[49980]: Added host vm00 2026-03-10T05:15:59.174 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:59 vm00 ceph-mon[49980]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:15:59.174 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:59 vm00 ceph-mon[49980]: Saving service mon spec with placement count:5 2026-03-10T05:15:59.174 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:59 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:59.174 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:59 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:59.174 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:15:59 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:15:59.374 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled grafana update... 2026-03-10T05:15:59.406 INFO:teuthology.orchestra.run.vm00.stderr:Deploying node-exporter service with default placement... 2026-03-10T05:15:59.854 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled node-exporter update... 2026-03-10T05:15:59.909 INFO:teuthology.orchestra.run.vm00.stderr:Deploying alertmanager service with default placement... 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: Saving service mgr spec with placement count:2 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: Saving service crash spec with placement * 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: Saving service prometheus spec with placement count:1 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:00.186 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:00 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:00.315 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled alertmanager update... 2026-03-10T05:16:01.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:01 vm00 ceph-mon[49980]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:01.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:01 vm00 ceph-mon[49980]: Saving service grafana spec with placement count:1 2026-03-10T05:16:01.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:01 vm00 ceph-mon[49980]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:01.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:01 vm00 ceph-mon[49980]: Saving service node-exporter spec with placement * 2026-03-10T05:16:01.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:01 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:01.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:01 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:01.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:01 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/619573764' entity='client.admin' 2026-03-10T05:16:01.286 INFO:teuthology.orchestra.run.vm00.stderr:Enabling the dashboard module... 2026-03-10T05:16:02.194 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:02 vm00 ceph-mon[49980]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:02.194 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:02 vm00 ceph-mon[49980]: Saving service alertmanager spec with placement count:1 2026-03-10T05:16:02.194 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:02 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/3347018677' entity='client.admin' 2026-03-10T05:16:02.194 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:02 vm00 ceph-mon[49980]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:02.194 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:02 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2567113296' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T05:16:02.847 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:16:02.847 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-10T05:16:02.847 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T05:16:02.847 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "vm00.vnepyw", 2026-03-10T05:16:02.847 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T05:16:02.847 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:16:02.940 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-10T05:16:02.940 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 9... 2026-03-10T05:16:03.453 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:03 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2567113296' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T05:16:03.453 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:03 vm00 ceph-mon[49980]: mgrmap e9: vm00.vnepyw(active, since 8s) 2026-03-10T05:16:03.453 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:03 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/3084428704' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T05:16:07.241 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:07 vm00 ceph-mon[49980]: Active manager daemon vm00.vnepyw restarted 2026-03-10T05:16:07.241 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:07 vm00 ceph-mon[49980]: Activating manager daemon vm00.vnepyw 2026-03-10T05:16:07.241 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:07 vm00 ceph-mon[49980]: osdmap e3: 0 total, 0 up, 0 in 2026-03-10T05:16:08.167 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T05:16:08.167 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-10T05:16:08.167 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T05:16:08.167 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T05:16:08.223 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 9 is available 2026-03-10T05:16:08.223 INFO:teuthology.orchestra.run.vm00.stderr:Generating a dashboard self-signed certificate... 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: mgrmap e10: vm00.vnepyw(active, starting, since 0.055285s) 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:08.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:08 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:08.681 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-10T05:16:08.728 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial admin user... 2026-03-10T05:16:09.243 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$/Ii2k/8q9e5kM0/HnQLbRO7gkcyIu/KbT0Dk4Oa/B2Eyo5a36tm8i", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773119769, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-10T05:16:09.269 INFO:teuthology.orchestra.run.vm00.stderr:Fetching dashboard port number... 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: [10/Mar/2026:05:16:07] ENGINE Bus STARTING 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: [10/Mar/2026:05:16:07] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: [10/Mar/2026:05:16:07] ENGINE Bus STARTED 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: mgrmap e11: vm00.vnepyw(active, since 1.06182s) 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:09.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:09 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:09.536 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 8443 2026-03-10T05:16:09.571 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T05:16:09.571 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-10T05:16:09.575 INFO:teuthology.orchestra.run.vm00.stderr:Ceph Dashboard is now available at: 2026-03-10T05:16:09.575 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:09.575 INFO:teuthology.orchestra.run.vm00.stderr: URL: https://vm00.local:8443/ 2026-03-10T05:16:09.575 INFO:teuthology.orchestra.run.vm00.stderr: User: admin 2026-03-10T05:16:09.575 INFO:teuthology.orchestra.run.vm00.stderr: Password: 43e5s7n2sm 2026-03-10T05:16:09.576 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:09.576 INFO:teuthology.orchestra.run.vm00.stderr:Enabling autotune for osd_memory_target 2026-03-10T05:16:10.183 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-10T05:16:10.214 INFO:teuthology.orchestra.run.vm00.stderr:You can access the Ceph CLI with: 2026-03-10T05:16:10.214 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: ceph telemetry on 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr:For more information see: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T05:16:10.215 INFO:teuthology.orchestra.run.vm00.stderr:Bootstrap complete. 2026-03-10T05:16:10.271 INFO:tasks.cephadm:Fetching config... 2026-03-10T05:16:10.271 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:16:10.271 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-10T05:16:10.287 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:10 vm00 ceph-mon[49980]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:10.287 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:10 vm00 ceph-mon[49980]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:10.287 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:10 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:10.287 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:10 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/471971931' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T05:16:10.287 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:10 vm00 ceph-mon[49980]: mgrmap e12: vm00.vnepyw(active, since 2s) 2026-03-10T05:16:10.287 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:10 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/738524834' entity='client.admin' 2026-03-10T05:16:10.351 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-10T05:16:10.351 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:16:10.351 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-10T05:16:10.431 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-10T05:16:10.431 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:16:10.431 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/keyring of=/dev/stdout 2026-03-10T05:16:10.535 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-10T05:16:10.535 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:16:10.535 DEBUG:teuthology.orchestra.run.vm00:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-10T05:16:10.602 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-10T05:16:10.602 DEBUG:teuthology.orchestra.run.vm00:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDISxM2CzCCFE6Tt4JOwCT0uhP6j+lomEzG1U869a/ZsH4tCone9Fap+x0RyS1VB3r2dg9czxDsAj3AgcofNbmJWetAsEJlMyqSQ1I0TM/21jmfCV2ctJ6MPAJGx8ptJ8s//eclc8dIw8UskdyMeDfF61a2SlklIDUfvhZokJVQqt0nj82xDwa0BFcZvMWHFbaE5f+hx93j/9KT+3vSV/oeh9b4Sld7wri1YEER2duwRJBvvCjXFO1MiQNWAhOpr8aWY7QgBQtMMOU++r5CJcC9uAFCdwGechxLCo+zH/ljeevNH26lI8AIOVh4rER0TlGnP9sLJ8aujjkC654B2C/9AvPd0xeKC/TepZ6ELYlCHBAWMeqDOgdUumoSH5eMbaKGud0SV17lIyVkpqpA3hHcdy3keLEIDlSzWaovqvRDMRdXcFjCfmHx8heDEf6PvlagxJT5Ifi3h7gvjAraXzfiLOj7+daO3OT1S2jqkJ3m12KX2oYpLLwpMPRgwmEgfM0= ceph-1a50eb6e-1c40-11f1-854f-9d3053100916' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T05:16:10.702 INFO:teuthology.orchestra.run.vm00.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDISxM2CzCCFE6Tt4JOwCT0uhP6j+lomEzG1U869a/ZsH4tCone9Fap+x0RyS1VB3r2dg9czxDsAj3AgcofNbmJWetAsEJlMyqSQ1I0TM/21jmfCV2ctJ6MPAJGx8ptJ8s//eclc8dIw8UskdyMeDfF61a2SlklIDUfvhZokJVQqt0nj82xDwa0BFcZvMWHFbaE5f+hx93j/9KT+3vSV/oeh9b4Sld7wri1YEER2duwRJBvvCjXFO1MiQNWAhOpr8aWY7QgBQtMMOU++r5CJcC9uAFCdwGechxLCo+zH/ljeevNH26lI8AIOVh4rER0TlGnP9sLJ8aujjkC654B2C/9AvPd0xeKC/TepZ6ELYlCHBAWMeqDOgdUumoSH5eMbaKGud0SV17lIyVkpqpA3hHcdy3keLEIDlSzWaovqvRDMRdXcFjCfmHx8heDEf6PvlagxJT5Ifi3h7gvjAraXzfiLOj7+daO3OT1S2jqkJ3m12KX2oYpLLwpMPRgwmEgfM0= ceph-1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:16:10.724 DEBUG:teuthology.orchestra.run.vm03:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDISxM2CzCCFE6Tt4JOwCT0uhP6j+lomEzG1U869a/ZsH4tCone9Fap+x0RyS1VB3r2dg9czxDsAj3AgcofNbmJWetAsEJlMyqSQ1I0TM/21jmfCV2ctJ6MPAJGx8ptJ8s//eclc8dIw8UskdyMeDfF61a2SlklIDUfvhZokJVQqt0nj82xDwa0BFcZvMWHFbaE5f+hx93j/9KT+3vSV/oeh9b4Sld7wri1YEER2duwRJBvvCjXFO1MiQNWAhOpr8aWY7QgBQtMMOU++r5CJcC9uAFCdwGechxLCo+zH/ljeevNH26lI8AIOVh4rER0TlGnP9sLJ8aujjkC654B2C/9AvPd0xeKC/TepZ6ELYlCHBAWMeqDOgdUumoSH5eMbaKGud0SV17lIyVkpqpA3hHcdy3keLEIDlSzWaovqvRDMRdXcFjCfmHx8heDEf6PvlagxJT5Ifi3h7gvjAraXzfiLOj7+daO3OT1S2jqkJ3m12KX2oYpLLwpMPRgwmEgfM0= ceph-1a50eb6e-1c40-11f1-854f-9d3053100916' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T05:16:10.763 INFO:teuthology.orchestra.run.vm03.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDISxM2CzCCFE6Tt4JOwCT0uhP6j+lomEzG1U869a/ZsH4tCone9Fap+x0RyS1VB3r2dg9czxDsAj3AgcofNbmJWetAsEJlMyqSQ1I0TM/21jmfCV2ctJ6MPAJGx8ptJ8s//eclc8dIw8UskdyMeDfF61a2SlklIDUfvhZokJVQqt0nj82xDwa0BFcZvMWHFbaE5f+hx93j/9KT+3vSV/oeh9b4Sld7wri1YEER2duwRJBvvCjXFO1MiQNWAhOpr8aWY7QgBQtMMOU++r5CJcC9uAFCdwGechxLCo+zH/ljeevNH26lI8AIOVh4rER0TlGnP9sLJ8aujjkC654B2C/9AvPd0xeKC/TepZ6ELYlCHBAWMeqDOgdUumoSH5eMbaKGud0SV17lIyVkpqpA3hHcdy3keLEIDlSzWaovqvRDMRdXcFjCfmHx8heDEf6PvlagxJT5Ifi3h7gvjAraXzfiLOj7+daO3OT1S2jqkJ3m12KX2oYpLLwpMPRgwmEgfM0= ceph-1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:16:10.776 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-10T05:16:11.790 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-10T05:16:11.790 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-10T05:16:12.682 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm03 2026-03-10T05:16:12.683 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T05:16:12.683 DEBUG:teuthology.orchestra.run.vm03:> dd of=/etc/ceph/ceph.conf 2026-03-10T05:16:12.699 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T05:16:12.699 DEBUG:teuthology.orchestra.run.vm03:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:12.755 INFO:tasks.cephadm:Adding host vm03 to orchestrator... 2026-03-10T05:16:12.755 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch host add vm03 2026-03-10T05:16:12.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:12 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1537396378' entity='client.admin' 2026-03-10T05:16:12.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:12 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:12.838 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:12 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:16:12.838 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:12 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:12.838 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:12 vm00 ceph-mon[49980]: Deploying daemon alertmanager.vm00 on vm00 2026-03-10T05:16:12.838 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:12 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:13.868 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:13 vm00 ceph-mon[49980]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:13.868 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:13 vm00 ceph-mon[49980]: from='client.14188 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:13.947 INFO:teuthology.orchestra.run.vm00.stdout:Added host 'vm03' with addr '192.168.123.103' 2026-03-10T05:16:14.005 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch host ls --format=json 2026-03-10T05:16:14.633 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:16:14.633 INFO:teuthology.orchestra.run.vm00.stdout:[{"addr": "192.168.123.100", "hostname": "vm00", "labels": [], "status": ""}, {"addr": "192.168.123.103", "hostname": "vm03", "labels": [], "status": ""}] 2026-03-10T05:16:14.841 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-10T05:16:14.842 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd crush tunables default 2026-03-10T05:16:14.877 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:14 vm00 ceph-mon[49980]: Deploying cephadm binary to vm03 2026-03-10T05:16:14.877 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:14 vm00 ceph-mon[49980]: mgrmap e13: vm00.vnepyw(active, since 6s) 2026-03-10T05:16:14.877 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:14 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:14.877 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:14 vm00 ceph-mon[49980]: Added host vm03 2026-03-10T05:16:15.893 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:15 vm00 ceph-mon[49980]: from='client.14190 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:16:16.688 INFO:teuthology.orchestra.run.vm00.stderr:adjusted tunables profile to default 2026-03-10T05:16:16.803 INFO:tasks.cephadm:Adding mon.vm00 on vm00 2026-03-10T05:16:16.803 INFO:tasks.cephadm:Adding mon.vm03 on vm03 2026-03-10T05:16:16.803 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch apply mon '2;vm00:192.168.123.100=vm00;vm03:192.168.123.103=vm03' 2026-03-10T05:16:16.903 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:16 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1999000823' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T05:16:16.903 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:16 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:16.903 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:16 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:16:16.903 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:16 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T05:16:16.903 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:16 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:17.310 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled mon update... 2026-03-10T05:16:17.378 DEBUG:teuthology.orchestra.run.vm03:mon.vm03> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@mon.vm03.service 2026-03-10T05:16:17.379 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:17.379 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:17.904 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:17.904 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:17.908 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: Deploying daemon crash.vm00 on vm00 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1999000823' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='client.14194 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm00:192.168.123.100=vm00;vm03:192.168.123.103=vm03", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: Saving service mon spec with placement vm00:192.168.123.100=vm00;vm03:192.168.123.103=vm03;count:2 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T05:16:17.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:17 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:18.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:18 vm00 ceph-mon[49980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T05:16:18.928 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:18 vm00 ceph-mon[49980]: Deploying daemon grafana.vm00 on vm00 2026-03-10T05:16:18.928 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:18 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1587524225' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:18.977 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:18.977 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:19.541 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:19.541 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:19.544 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:20.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:20 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/75310320' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:20.591 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:20.591 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:21.572 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:21.572 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:21.575 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:22.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:21 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1534882922' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:22.644 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:22.644 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:23.133 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:23.134 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:23.136 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:23.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:23 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:23.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:23 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/3194640308' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:24.181 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:24.181 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:24.694 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:24.695 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:24.696 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:25.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:25 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/4290776118' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:25.763 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:25.763 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:26.256 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:26.257 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:26.258 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:26.906 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:26 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/3441432120' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:27.325 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:27.325 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:27.853 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:27.853 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:27.855 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:28.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:28 vm00 ceph-mon[49980]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:28.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:28 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:28.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:28 vm00 ceph-mon[49980]: Deploying daemon node-exporter.vm00 on vm00 2026-03-10T05:16:28.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:28 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1042397411' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:28.901 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:28.901 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:29.406 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:29.406 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:29.408 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:30.477 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:30.477 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:30.524 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:30 vm00 ceph-mon[49980]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:30.524 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:30 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1159350567' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:30.524 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:30 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:30.524 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:30 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T05:16:30.996 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:30.996 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:30.998 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:32.040 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:32.040 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:32.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:32 vm00 ceph-mon[49980]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T05:16:32.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:32 vm00 ceph-mon[49980]: mgrmap e14: vm00.vnepyw(active, since 23s) 2026-03-10T05:16:32.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:32 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1465344203' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:32.575 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:32.576 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:32.578 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:33.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:33 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/4283421850' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:33.651 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:33.651 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:34.125 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:34.125 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:34.128 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:35.195 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:35.196 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:35.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:35 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/2787234426' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:35.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:35 vm00 ceph-mon[49980]: Active manager daemon vm00.vnepyw restarted 2026-03-10T05:16:35.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:35 vm00 ceph-mon[49980]: Activating manager daemon vm00.vnepyw 2026-03-10T05:16:35.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:35 vm00 ceph-mon[49980]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T05:16:35.787 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:35.787 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:35.788 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: mgrmap e15: vm00.vnepyw(active, starting, since 0.0153619s) 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: [10/Mar/2026:05:16:35] ENGINE Bus STARTING 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/10933340' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: [10/Mar/2026:05:16:35] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T05:16:36.164 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: [10/Mar/2026:05:16:35] ENGINE Bus STARTED 2026-03-10T05:16:36.165 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:36.165 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:36.846 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:36.846 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:37.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: mgrmap e16: vm00.vnepyw(active, since 1.02482s) 2026-03-10T05:16:37.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:37.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:37.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:16:37.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T05:16:37.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:37.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:37.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:37.434 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:37.434 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:37.445 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:38.505 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:38.505 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:38.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:38 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/24861417' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:38.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:38 vm00 ceph-mon[49980]: mgrmap e17: vm00.vnepyw(active, since 2s) 2026-03-10T05:16:39.458 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:39.458 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:39.466 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:39.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:39.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:39.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:16:39.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:39.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:16:39.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T05:16:39.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:39.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:39 vm00 ceph-mon[49980]: Deploying daemon crash.vm03 on vm03 2026-03-10T05:16:40.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/2669188674' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:40.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:40.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.vqfmrv", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T05:16:40.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm03.vqfmrv", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T05:16:40.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T05:16:40.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:40.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: Deploying daemon mgr.vm03.vqfmrv on vm03 2026-03-10T05:16:40.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:40.548 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:40.548 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:41.511 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:41.511 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:15:34.587958Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T05:16:41.517 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T05:16:41.741 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: starting mon.vm03 rank -1 at public addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] at bind addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon_data /var/lib/ceph/mon/ceph-vm03 fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:16:41.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:41.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T05:16:41.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:41.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:41 vm00 ceph-mon[49980]: Deploying daemon mon.vm03 on vm03 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(???) e0 preinit fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).mds e1 new map 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).mds e1 print_map 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout: e1 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout: legacy client fscid: -1 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout: No filesystems configured 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T05:16:42.015 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mkfs 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: monmap e1: 1 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0]} 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: fsmap 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e1: no daemons active 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2935347550' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3986681299' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3986681299' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2392068062' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: monmap e1: 1 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0]} 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: fsmap 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e1: no daemons active 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/445991605' entity='client.admin' 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1579308259' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3191018435' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Activating manager daemon vm00.vnepyw 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e2: vm00.vnepyw(active, starting, since 0.0820428s) 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14100 192.168.123.100:0/3512525413' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.016 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/714162547' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e3: vm00.vnepyw(active, since 1.08539s) 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e4: vm00.vnepyw(active, since 2s) 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/4138959648' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/4030771869' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1972491054' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1972491054' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e5: vm00.vnepyw(active, since 4s) 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2390471054' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Active manager daemon vm00.vnepyw restarted 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Activating manager daemon vm00.vnepyw 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: osdmap e2: 0 total, 0 up, 0 in 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e6: vm00.vnepyw(active, starting, since 0.0999647s) 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e7: vm00.vnepyw(active, since 1.10904s) 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.017 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:15:55] ENGINE Bus STARTING 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:15:55] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:15:55] ENGINE Bus STARTED 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Generating ssh key... 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e8: vm00.vnepyw(active, since 2s) 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm00", "addr": "192.168.123.100", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying cephadm binary to vm00 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Added host vm00 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service mon spec with placement count:5 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service mgr spec with placement count:2 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service crash spec with placement * 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service prometheus spec with placement count:1 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service grafana spec with placement count:1 2026-03-10T05:16:42.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service node-exporter spec with placement * 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/619573764' entity='client.admin' 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service alertmanager spec with placement count:1 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3347018677' entity='client.admin' 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14120 192.168.123.100:0/814639140' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2567113296' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2567113296' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e9: vm00.vnepyw(active, since 8s) 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3084428704' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Active manager daemon vm00.vnepyw restarted 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Activating manager daemon vm00.vnepyw 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: osdmap e3: 0 total, 0 up, 0 in 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e10: vm00.vnepyw(active, starting, since 0.055285s) 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:16:07] ENGINE Bus STARTING 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:16:07] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:16:07] ENGINE Bus STARTED 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e11: vm00.vnepyw(active, since 1.06182s) 2026-03-10T05:16:42.019 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/471971931' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e12: vm00.vnepyw(active, since 2s) 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/738524834' entity='client.admin' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1537396378' entity='client.admin' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying daemon alertmanager.vm00 on vm00 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14188 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying cephadm binary to vm03 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e13: vm00.vnepyw(active, since 6s) 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Added host vm03 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14190 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1999000823' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying daemon crash.vm00 on vm00 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1999000823' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.14194 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm00:192.168.123.100=vm00;vm03:192.168.123.103=vm03", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Saving service mon spec with placement vm00:192.168.123.100=vm00;vm03:192.168.123.103=vm03;count:2 2026-03-10T05:16:42.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying daemon grafana.vm00 on vm00 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1587524225' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/75310320' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1534882922' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/3194640308' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/4290776118' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/3441432120' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying daemon node-exporter.vm00 on vm00 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1042397411' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1159350567' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14162 192.168.123.100:0/475496678' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e14: vm00.vnepyw(active, since 23s) 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1465344203' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/4283421850' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/2787234426' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Active manager daemon vm00.vnepyw restarted 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Activating manager daemon vm00.vnepyw 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e15: vm00.vnepyw(active, starting, since 0.0153619s) 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm00.vnepyw", "id": "vm00.vnepyw"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Manager daemon vm00.vnepyw is now available 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:16:42.021 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:16:35] ENGINE Bus STARTING 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/10933340' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:16:35] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: [10/Mar/2026:05:16:35] ENGINE Bus STARTED 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e16: vm00.vnepyw(active, since 1.02482s) 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/24861417' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mgrmap e17: vm00.vnepyw(active, since 2s) 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying daemon crash.vm03 on vm03 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/2669188674' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.vqfmrv", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm03.vqfmrv", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying daemon mgr.vm03.vqfmrv on vm03 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: Deploying daemon mon.vm03 on vm03 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing).paxosservice(auth 1..6) refresh upgraded, format 0 -> 3 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expand map: {default=false} 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta from 'false' to 'false' 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expanded map: {default=false} 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expand map: {default=info} 2026-03-10T05:16:42.022 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta from 'info' to 'info' 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expanded map: {default=info} 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expand map: {default=daemon} 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta from 'daemon' to 'daemon' 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expanded map: {default=daemon} 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expand map: {default=debug} 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta from 'debug' to 'debug' 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: expand_channel_meta expanded map: {default=debug} 2026-03-10T05:16:42.023 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:41 vm03 ceph-mon[50983]: mon.vm03@-1(synchronizing) e1 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-10T05:16:42.588 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T05:16:42.588 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mon dump -f json 2026-03-10T05:16:47.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: Deploying daemon node-exporter.vm03 on vm03 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: mon.vm00 calling monitor election 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: mon.vm03 calling monitor election 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm03.vqfmrv/crt"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: mon.vm00 is new leader, mons vm00,vm03 in quorum (ranks 0,1) 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: monmap e2: 2 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],vm03=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0]} 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: fsmap 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: mgrmap e17: vm00.vnepyw(active, since 11s) 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: Standby manager daemon vm03.vqfmrv started 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: overall HEALTH_OK 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm03.vqfmrv/key"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:47.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: Deploying daemon node-exporter.vm03 on vm03 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: mon.vm00 calling monitor election 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: mon.vm03 calling monitor election 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm03.vqfmrv/crt"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: mon.vm00 is new leader, mons vm00,vm03 in quorum (ranks 0,1) 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: monmap e2: 2 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],vm03=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0]} 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: fsmap 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: mgrmap e17: vm00.vnepyw(active, since 11s) 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: Standby manager daemon vm03.vqfmrv started 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: overall HEALTH_OK 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm03.vqfmrv/key"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.? 192.168.123.103:0/3848119366' entity='mgr.vm03.vqfmrv' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:47.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:47.534 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:16:47.535 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":2,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","modified":"2026-03-10T05:16:41.789117Z","created":"2026-03-10T05:15:34.587958Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T05:16:47.537 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 2 2026-03-10T05:16:47.583 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-10T05:16:47.583 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph config generate-minimal-conf 2026-03-10T05:16:48.030 INFO:teuthology.orchestra.run.vm00.stdout:# minimal ceph.conf for 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:16:48.031 INFO:teuthology.orchestra.run.vm00.stdout:[global] 2026-03-10T05:16:48.031 INFO:teuthology.orchestra.run.vm00.stdout: fsid = 1a50eb6e-1c40-11f1-854f-9d3053100916 2026-03-10T05:16:48.031 INFO:teuthology.orchestra.run.vm00.stdout: mon_host = [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] 2026-03-10T05:16:48.093 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-10T05:16:48.093 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:16:48.093 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T05:16:48.117 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:47 vm00 ceph-mon[49980]: Deploying daemon prometheus.vm00 on vm00 2026-03-10T05:16:48.117 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:47 vm00 ceph-mon[49980]: mgrmap e18: vm00.vnepyw(active, since 11s), standbys: vm03.vqfmrv 2026-03-10T05:16:48.117 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm03.vqfmrv", "id": "vm03.vqfmrv"}]: dispatch 2026-03-10T05:16:48.117 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:47 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/2431617114' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:48.117 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:48.122 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:16:48.122 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:48.187 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T05:16:48.187 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T05:16:48.210 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:47 vm03 ceph-mon[50983]: Deploying daemon prometheus.vm00 on vm00 2026-03-10T05:16:48.210 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:47 vm03 ceph-mon[50983]: mgrmap e18: vm00.vnepyw(active, since 11s), standbys: vm03.vqfmrv 2026-03-10T05:16:48.210 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr metadata", "who": "vm03.vqfmrv", "id": "vm03.vqfmrv"}]: dispatch 2026-03-10T05:16:48.210 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:47 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/2431617114' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T05:16:48.210 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:16:48.214 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T05:16:48.214 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:48.277 INFO:tasks.cephadm:Deploying OSDs... 2026-03-10T05:16:48.277 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:16:48.277 DEBUG:teuthology.orchestra.run.vm00:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T05:16:48.293 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T05:16:48.294 DEBUG:teuthology.orchestra.run.vm00:> ls /dev/[sv]d? 2026-03-10T05:16:48.351 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vda 2026-03-10T05:16:48.352 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdb 2026-03-10T05:16:48.352 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdc 2026-03-10T05:16:48.352 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdd 2026-03-10T05:16:48.352 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vde 2026-03-10T05:16:48.352 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T05:16:48.352 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T05:16:48.352 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdb 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdb 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 05:16:11.911048611 +0000 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 05:16:11.574047738 +0000 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 05:16:11.574047738 +0000 2026-03-10T05:16:48.412 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 05:11:05.272000000 +0000 2026-03-10T05:16:48.412 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T05:16:48.474 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T05:16:48.474 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T05:16:48.474 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000172593 s, 3.0 MB/s 2026-03-10T05:16:48.475 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T05:16:48.530 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdc 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdc 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 05:16:11.982048795 +0000 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 05:16:11.567047720 +0000 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 05:16:11.567047720 +0000 2026-03-10T05:16:48.588 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 05:11:05.279000000 +0000 2026-03-10T05:16:48.588 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T05:16:48.651 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T05:16:48.652 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T05:16:48.652 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000185458 s, 2.8 MB/s 2026-03-10T05:16:48.652 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T05:16:48.710 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdd 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdd 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 05:16:12.100049101 +0000 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 05:16:11.568047723 +0000 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 05:16:11.568047723 +0000 2026-03-10T05:16:48.777 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 05:11:05.286000000 +0000 2026-03-10T05:16:48.777 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T05:16:48.986 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T05:16:48.987 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T05:16:48.987 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.00059106 s, 866 kB/s 2026-03-10T05:16:48.989 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T05:16:49.088 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vde 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vde 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 05:16:12.214049396 +0000 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 05:16:11.569047725 +0000 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 05:16:11.569047725 +0000 2026-03-10T05:16:49.191 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 05:11:05.347000000 +0000 2026-03-10T05:16:49.191 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T05:16:49.226 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:48 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2463592171' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:49.229 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T05:16:49.229 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T05:16:49.229 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000112472 s, 4.6 MB/s 2026-03-10T05:16:49.230 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T05:16:49.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:48 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2463592171' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:49.320 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T05:16:49.320 DEBUG:teuthology.orchestra.run.vm03:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T05:16:49.334 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T05:16:49.334 DEBUG:teuthology.orchestra.run.vm03:> ls /dev/[sv]d? 2026-03-10T05:16:49.388 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vda 2026-03-10T05:16:49.389 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdb 2026-03-10T05:16:49.389 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdc 2026-03-10T05:16:49.389 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdd 2026-03-10T05:16:49.389 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vde 2026-03-10T05:16:49.389 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T05:16:49.389 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T05:16:49.389 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdb 2026-03-10T05:16:49.445 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdb 2026-03-10T05:16:49.445 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:49.445 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-10T05:16:49.445 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:49.445 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:49.445 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 05:16:37.889666438 +0000 2026-03-10T05:16:49.446 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 05:16:37.583663440 +0000 2026-03-10T05:16:49.446 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 05:16:37.583663440 +0000 2026-03-10T05:16:49.446 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 05:11:29.275000000 +0000 2026-03-10T05:16:49.446 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T05:16:49.508 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T05:16:49.508 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T05:16:49.508 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000145985 s, 3.5 MB/s 2026-03-10T05:16:49.509 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T05:16:49.566 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdc 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdc 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 05:16:37.964667173 +0000 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 05:16:37.583663440 +0000 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 05:16:37.583663440 +0000 2026-03-10T05:16:49.627 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 05:11:29.279000000 +0000 2026-03-10T05:16:49.627 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T05:16:49.694 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T05:16:49.694 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T05:16:49.694 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000205557 s, 2.5 MB/s 2026-03-10T05:16:49.695 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T05:16:49.751 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdd 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdd 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 05:16:38.039667908 +0000 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 05:16:37.590663509 +0000 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 05:16:37.590663509 +0000 2026-03-10T05:16:49.811 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 05:11:29.284000000 +0000 2026-03-10T05:16:49.811 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T05:16:49.875 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T05:16:49.875 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T05:16:49.875 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000163927 s, 3.1 MB/s 2026-03-10T05:16:49.876 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T05:16:49.934 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vde 2026-03-10T05:16:49.990 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vde 2026-03-10T05:16:49.990 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T05:16:49.990 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T05:16:49.990 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T05:16:49.991 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T05:16:49.991 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 05:16:38.110668603 +0000 2026-03-10T05:16:49.991 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 05:16:37.591663519 +0000 2026-03-10T05:16:49.991 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 05:16:37.591663519 +0000 2026-03-10T05:16:49.991 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 05:11:29.336000000 +0000 2026-03-10T05:16:49.991 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T05:16:50.053 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T05:16:50.053 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T05:16:50.053 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000187292 s, 2.7 MB/s 2026-03-10T05:16:50.054 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T05:16:50.110 INFO:tasks.cephadm:Deploying osd.0 on vm00 with /dev/vde... 2026-03-10T05:16:50.110 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vde 2026-03-10T05:16:51.730 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:16:51.744 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm00:/dev/vde 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T05:16:52.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T05:16:52.567 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='client.14246 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T05:16:53.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:53.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='client.14246 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T05:16:53.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:54.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:54 vm00 ceph-mon[49980]: Reconfiguring mon.vm00 (unknown last config time)... 2026-03-10T05:16:54.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:54 vm00 ceph-mon[49980]: Reconfiguring daemon mon.vm00 on vm00 2026-03-10T05:16:54.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:54 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/192829733' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "44f45ef3-82a5-4b34-87a0-62d9f3779646"}]: dispatch 2026-03-10T05:16:54.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:54 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/192829733' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "44f45ef3-82a5-4b34-87a0-62d9f3779646"}]': finished 2026-03-10T05:16:54.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:54 vm00 ceph-mon[49980]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T05:16:54.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:54 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:16:54.783 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:54 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/554340462' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:16:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:54 vm03 ceph-mon[50983]: Reconfiguring mon.vm00 (unknown last config time)... 2026-03-10T05:16:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:54 vm03 ceph-mon[50983]: Reconfiguring daemon mon.vm00 on vm00 2026-03-10T05:16:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:54 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/192829733' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "44f45ef3-82a5-4b34-87a0-62d9f3779646"}]: dispatch 2026-03-10T05:16:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:54 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/192829733' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "44f45ef3-82a5-4b34-87a0-62d9f3779646"}]': finished 2026-03-10T05:16:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:54 vm03 ceph-mon[50983]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T05:16:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:54 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:16:54.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:54 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/554340462' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:16:56.701 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:56 vm00 ceph-mon[49980]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:56.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:56 vm03 ceph-mon[50983]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:57.629 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:57.629 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: Reconfiguring mgr.vm00.vnepyw (unknown last config time)... 2026-03-10T05:16:57.629 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm00.vnepyw", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T05:16:57.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T05:16:57.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:57.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: Reconfiguring daemon mgr.vm00.vnepyw on vm00 2026-03-10T05:16:57.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T05:16:57.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:57.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: Deploying daemon osd.0 on vm00 2026-03-10T05:16:57.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:57 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:57.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: Reconfiguring mgr.vm00.vnepyw (unknown last config time)... 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm00.vnepyw", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: Reconfiguring daemon mgr.vm00.vnepyw on vm00 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: Deploying daemon osd.0 on vm00 2026-03-10T05:16:57.809 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:57 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:58.724 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:58 vm00 ceph-mon[49980]: Reconfiguring alertmanager.vm00 (dependencies changed)... 2026-03-10T05:16:58.724 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:58 vm00 ceph-mon[49980]: Reconfiguring daemon alertmanager.vm00 on vm00 2026-03-10T05:16:58.724 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:58 vm00 ceph-mon[49980]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:58.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:58 vm03 ceph-mon[50983]: Reconfiguring alertmanager.vm00 (dependencies changed)... 2026-03-10T05:16:58.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:58 vm03 ceph-mon[50983]: Reconfiguring daemon alertmanager.vm00 on vm00 2026-03-10T05:16:58.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:58 vm03 ceph-mon[50983]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:16:59.717 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 0 on host 'vm00' 2026-03-10T05:16:59.774 DEBUG:teuthology.orchestra.run.vm00:osd.0> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.0.service 2026-03-10T05:16:59.775 INFO:tasks.cephadm:Deploying osd.1 on vm00 with /dev/vdd... 2026-03-10T05:16:59.775 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vdd 2026-03-10T05:16:59.917 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:59 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:59.917 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:59 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:16:59.917 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:59 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:16:59.917 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:16:59 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:00.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:59 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:00.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:59 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:00.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:59 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:17:00.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:16:59 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: Reconfiguring crash.vm00 (monmap changed)... 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: Reconfiguring daemon crash.vm00 on vm00 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: Reconfiguring grafana.vm00 (dependencies changed)... 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: Reconfiguring daemon grafana.vm00 on vm00 2026-03-10T05:17:00.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:00 vm00 ceph-mon[49980]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T05:17:00.726 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 05:17:00 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-0[59591]: 2026-03-10T05:17:00.517+0000 7f618f4553c0 -1 osd.0 0 log_to_monitors true 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: Reconfiguring crash.vm00 (monmap changed)... 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: Reconfiguring daemon crash.vm00 on vm00 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: Reconfiguring grafana.vm00 (dependencies changed)... 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: Reconfiguring daemon grafana.vm00 on vm00 2026-03-10T05:17:01.011 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:00 vm03 ceph-mon[50983]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T05:17:01.369 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:17:01.383 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm00:/dev/vdd 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: Reconfiguring crash.vm03 (monmap changed)... 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: Reconfiguring daemon crash.vm03 on vm03 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: osdmap e7: 1 total, 0 up, 1 in 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.vqfmrv", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:02.059 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: Reconfiguring crash.vm03 (monmap changed)... 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: Reconfiguring daemon crash.vm03 on vm03 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: osdmap e7: 1 total, 0 up, 1 in 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.vqfmrv", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:02.190 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:02.533 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 05:17:02 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-0[59591]: 2026-03-10T05:17:02.233+0000 7f6185e58700 -1 osd.0 0 waiting for initial osdmap 2026-03-10T05:17:02.533 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 05:17:02 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-0[59591]: 2026-03-10T05:17:02.237+0000 7f6180ff0700 -1 osd.0 8 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: Reconfiguring mgr.vm03.vqfmrv (monmap changed)... 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: Reconfiguring daemon mgr.vm03.vqfmrv on vm03 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: Reconfiguring mon.vm03 (monmap changed)... 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: Reconfiguring daemon mon.vm03 on vm03 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='client.14264 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: osdmap e8: 1 total, 0 up, 1 in 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/692120374' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1efabf27-c710-42d6-a867-b95e6b7de593"}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037] boot 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/692120374' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1efabf27-c710-42d6-a867-b95e6b7de593"}]': finished 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:03.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:02 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: Reconfiguring mgr.vm03.vqfmrv (monmap changed)... 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: Reconfiguring daemon mgr.vm03.vqfmrv on vm03 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: Reconfiguring mon.vm03 (monmap changed)... 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: Reconfiguring daemon mon.vm03 on vm03 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='client.14264 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: osdmap e8: 1 total, 0 up, 1 in 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/692120374' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1efabf27-c710-42d6-a867-b95e6b7de593"}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: osd.0 [v2:192.168.123.100:6802/698063037,v1:192.168.123.100:6803/698063037] boot 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/692120374' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1efabf27-c710-42d6-a867-b95e6b7de593"}]': finished 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T05:17:03.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:02 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:04.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:03 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:17:04.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:03 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:17:04.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:03 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2095803984' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:04.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:03 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:17:04.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:03 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:17:04.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:03 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2095803984' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:05.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:04 vm00 ceph-mon[49980]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:17:05.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:04 vm00 ceph-mon[49980]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T05:17:05.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:04 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:05.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:04 vm03 ceph-mon[50983]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T05:17:05.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:04 vm03 ceph-mon[50983]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T05:17:05.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:04 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:07.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:06 vm00 ceph-mon[49980]: pgmap v13: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:07.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:06 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T05:17:07.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:06 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:07.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:06 vm03 ceph-mon[50983]: pgmap v13: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:07.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:06 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T05:17:07.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:06 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:08.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:08 vm00 ceph-mon[49980]: Deploying daemon osd.1 on vm00 2026-03-10T05:17:08.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:08 vm03 ceph-mon[50983]: Deploying daemon osd.1 on vm00 2026-03-10T05:17:08.871 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 1 on host 'vm00' 2026-03-10T05:17:08.938 DEBUG:teuthology.orchestra.run.vm00:osd.1> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.1.service 2026-03-10T05:17:08.942 INFO:tasks.cephadm:Deploying osd.2 on vm00 with /dev/vdc... 2026-03-10T05:17:08.942 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vdc 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: pgmap v14: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:09.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:09 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: pgmap v14: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:09.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:09 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:09.981 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 05:17:09 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-1[62434]: 2026-03-10T05:17:09.893+0000 7f475c2283c0 -1 osd.1 0 log_to_monitors true 2026-03-10T05:17:09.997 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:17:10.012 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm00:/dev/vdc 2026-03-10T05:17:10.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:10 vm00 ceph-mon[49980]: pgmap v15: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:10.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:10 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:10.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:10 vm00 ceph-mon[49980]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T05:17:10.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:10 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:10.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:10 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:10.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:10 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:10.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:10 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:10 vm03 ceph-mon[50983]: pgmap v15: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:10 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:10 vm03 ceph-mon[50983]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T05:17:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:10 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:10 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:10 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:10 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='client.24129 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: osdmap e11: 2 total, 1 up, 2 in 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1559804465' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c055bfa2-ecbd-4aee-95cc-b2b7071e6a8b"}]: dispatch 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1559804465' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c055bfa2-ecbd-4aee-95cc-b2b7071e6a8b"}]': finished 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: osdmap e12: 3 total, 1 up, 3 in 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:11.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:11 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:11.907 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 05:17:11 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-1[62434]: 2026-03-10T05:17:11.543+0000 7f4752c2b700 -1 osd.1 0 waiting for initial osdmap 2026-03-10T05:17:11.907 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 05:17:11 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-1[62434]: 2026-03-10T05:17:11.565+0000 7f474cdc1700 -1 osd.1 12 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='client.24129 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: osdmap e11: 2 total, 1 up, 2 in 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1559804465' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c055bfa2-ecbd-4aee-95cc-b2b7071e6a8b"}]: dispatch 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1559804465' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c055bfa2-ecbd-4aee-95cc-b2b7071e6a8b"}]': finished 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: osdmap e12: 3 total, 1 up, 3 in 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:12.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:11 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/3653819981' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593] boot 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:13.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:12 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3653819981' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: osd.1 [v2:192.168.123.100:6810/3501474593,v1:192.168.123.100:6811/3501474593] boot 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T05:17:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:12 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:15.182 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:14 vm00 ceph-mon[49980]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:15.183 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:14 vm00 ceph-mon[49980]: osdmap e14: 3 total, 2 up, 3 in 2026-03-10T05:17:15.183 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:14 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:14 vm03 ceph-mon[50983]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T05:17:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:14 vm03 ceph-mon[50983]: osdmap e14: 3 total, 2 up, 3 in 2026-03-10T05:17:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:14 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: pgmap v22: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: Detected new or changed devices on vm00 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:16.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:16 vm00 ceph-mon[49980]: Deploying daemon osd.2 on vm00 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: pgmap v22: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: Detected new or changed devices on vm00 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:16.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:16 vm03 ceph-mon[50983]: Deploying daemon osd.2 on vm00 2026-03-10T05:17:18.260 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 2 on host 'vm00' 2026-03-10T05:17:18.313 DEBUG:teuthology.orchestra.run.vm00:osd.2> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.2.service 2026-03-10T05:17:18.314 INFO:tasks.cephadm:Deploying osd.3 on vm00 with /dev/vdb... 2026-03-10T05:17:18.314 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vdb 2026-03-10T05:17:18.516 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:18 vm00 ceph-mon[49980]: pgmap v23: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:18.517 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:18 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:18.517 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:18 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:18.517 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:18 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:18.517 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:18 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:18.517 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:18 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:18.517 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:18 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:18.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:18 vm03 ceph-mon[50983]: pgmap v23: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:18.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:18 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:18.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:18 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:18.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:18 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:18.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:18 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:18.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:18 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:18.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:18 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:19.073 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:17:19.090 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm00:/dev/vdb 2026-03-10T05:17:19.170 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 05:17:18 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-2[65402]: 2026-03-10T05:17:18.936+0000 7f5c5d5403c0 -1 osd.2 0 log_to_monitors true 2026-03-10T05:17:20.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:19 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:20.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:19 vm00 ceph-mon[49980]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T05:17:20.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:19 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:20.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:19 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:20.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:19 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:20.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:19 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:20.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:19 vm03 ceph-mon[50983]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T05:17:20.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:19 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:20.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:19 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:20.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:19 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:20.802 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 05:17:20 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-2[65402]: 2026-03-10T05:17:20.680+0000 7f5c53f43700 -1 osd.2 0 waiting for initial osdmap 2026-03-10T05:17:20.802 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 05:17:20 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-2[65402]: 2026-03-10T05:17:20.686+0000 7f5c4e0d9700 -1 osd.2 16 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:17:20.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: pgmap v24: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:20.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='client.14300 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:20.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T05:17:20.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: osdmap e15: 3 total, 2 up, 3 in 2026-03-10T05:17:20.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76f35679-51a1-4fac-bf59-17ca29626b1e"}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2384773754' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76f35679-51a1-4fac-bf59-17ca29626b1e"}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "76f35679-51a1-4fac-bf59-17ca29626b1e"}]': finished 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: osdmap e16: 4 total, 2 up, 4 in 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:20.909 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: pgmap v24: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='client.14300 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: osdmap e15: 3 total, 2 up, 3 in 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76f35679-51a1-4fac-bf59-17ca29626b1e"}]: dispatch 2026-03-10T05:17:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2384773754' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76f35679-51a1-4fac-bf59-17ca29626b1e"}]: dispatch 2026-03-10T05:17:21.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:21.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "76f35679-51a1-4fac-bf59-17ca29626b1e"}]': finished 2026-03-10T05:17:21.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: osdmap e16: 4 total, 2 up, 4 in 2026-03-10T05:17:21.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:21.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:21.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:21.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:22.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:21 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/3729285351' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:22.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:21 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:22.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:21 vm00 ceph-mon[49980]: osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230] boot 2026-03-10T05:17:22.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:21 vm00 ceph-mon[49980]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T05:17:22.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:21 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:22.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:21 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:21 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3729285351' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:21 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:21 vm03 ceph-mon[50983]: osd.2 [v2:192.168.123.100:6818/3185081230,v1:192.168.123.100:6819/3185081230] boot 2026-03-10T05:17:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:21 vm03 ceph-mon[50983]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T05:17:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:21 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T05:17:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:21 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:23.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:22 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:17:23.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:22 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:17:23.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:22 vm00 ceph-mon[49980]: pgmap v27: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:22 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:17:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:22 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:17:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:22 vm03 ceph-mon[50983]: pgmap v27: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-10T05:17:24.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:23 vm00 ceph-mon[49980]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T05:17:24.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:24.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T05:17:24.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:23 vm03 ceph-mon[50983]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T05:17:24.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:24.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T05:17:25.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:24 vm00 ceph-mon[49980]: pgmap v30: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:25.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T05:17:25.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:24 vm00 ceph-mon[49980]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T05:17:25.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:25.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T05:17:25.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:25.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:24 vm03 ceph-mon[50983]: pgmap v30: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T05:17:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:24 vm03 ceph-mon[50983]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T05:17:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T05:17:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:26.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:25 vm00 ceph-mon[49980]: Detected new or changed devices on vm00 2026-03-10T05:17:26.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T05:17:26.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:25 vm00 ceph-mon[49980]: osdmap e20: 4 total, 3 up, 4 in 2026-03-10T05:17:26.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:26.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T05:17:26.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:25 vm03 ceph-mon[50983]: Detected new or changed devices on vm00 2026-03-10T05:17:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T05:17:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:25 vm03 ceph-mon[50983]: osdmap e20: 4 total, 3 up, 4 in 2026-03-10T05:17:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T05:17:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:27.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: Deploying daemon osd.3 on vm00 2026-03-10T05:17:27.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: pgmap v33: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:27.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: osdmap e21: 4 total, 3 up, 4 in 2026-03-10T05:17:27.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:27.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:27.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:27.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:27.283 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: Deploying daemon osd.3 on vm00 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: pgmap v33: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: osdmap e21: 4 total, 3 up, 4 in 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:27.977 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 05:17:27 vm00 sudo[69200]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-10T05:17:27.977 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 05:17:27 vm00 sudo[69200]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T05:17:27.977 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 05:17:27 vm00 sudo[69200]: pam_unix(sudo:session): session closed for user root 2026-03-10T05:17:28.157 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 3 on host 'vm00' 2026-03-10T05:17:28.259 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69275]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-10T05:17:28.259 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69275]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T05:17:28.259 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69275]: pam_unix(sudo:session): session closed for user root 2026-03-10T05:17:28.261 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:28 vm00 ceph-mon[49980]: pgmap v35: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:28.263 DEBUG:teuthology.orchestra.run.vm00:osd.3> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.3.service 2026-03-10T05:17:28.264 INFO:tasks.cephadm:Deploying osd.4 on vm03 with /dev/vde... 2026-03-10T05:17:28.264 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vde 2026-03-10T05:17:28.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:28 vm03 ceph-mon[50983]: pgmap v35: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:28.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69455]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T05:17:28.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69455]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T05:17:28.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69455]: pam_unix(sudo:session): session closed for user root 2026-03-10T05:17:28.521 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69367]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-10T05:17:28.522 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69367]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T05:17:28.522 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 05:17:28 vm00 sudo[69367]: pam_unix(sudo:session): session closed for user root 2026-03-10T05:17:28.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:28 vm03 sudo[53282]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T05:17:28.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:28 vm03 sudo[53282]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T05:17:28.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:28 vm03 sudo[53282]: pam_unix(sudo:session): session closed for user root 2026-03-10T05:17:29.135 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:17:29.149 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm03:/dev/vde 2026-03-10T05:17:29.358 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 05:17:29 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-3[68932]: 2026-03-10T05:17:29.111+0000 7f8b7d7753c0 -1 osd.3 0 log_to_monitors true 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='osd.3 [v2:192.168.123.100:6826/2086552335,v1:192.168.123.100:6827/2086552335]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T05:17:29.626 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:29 vm03 ceph-mon[50983]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='osd.3 [v2:192.168.123.100:6826/2086552335,v1:192.168.123.100:6827/2086552335]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T05:17:29.683 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:29 vm00 ceph-mon[49980]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T05:17:30.606 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 05:17:30 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-3[68932]: 2026-03-10T05:17:30.474+0000 7f8b7597b700 -1 osd.3 0 waiting for initial osdmap 2026-03-10T05:17:30.606 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 05:17:30 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-3[68932]: 2026-03-10T05:17:30.480+0000 7f8b6fb11700 -1 osd.3 23 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: pgmap v36: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: osdmap e22: 4 total, 3 up, 4 in 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: mgrmap e19: vm00.vnepyw(active, since 54s), standbys: vm03.vqfmrv 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='osd.3 [v2:192.168.123.100:6826/2086552335,v1:192.168.123.100:6827/2086552335]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='client.24159 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:30.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:30 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: pgmap v36: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: osdmap e22: 4 total, 3 up, 4 in 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: mgrmap e19: vm00.vnepyw(active, since 54s), standbys: vm03.vqfmrv 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='osd.3 [v2:192.168.123.100:6826/2086552335,v1:192.168.123.100:6827/2086552335]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='client.24159 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:30.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:30 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:31.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:31.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: osdmap e23: 4 total, 3 up, 4 in 2026-03-10T05:17:31.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:31.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:31.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4168843c-8fa2-44f0-b4ba-f52b27d6011b"}]: dispatch 2026-03-10T05:17:31.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/2043014804' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4168843c-8fa2-44f0-b4ba-f52b27d6011b"}]: dispatch 2026-03-10T05:17:31.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: osd.3 [v2:192.168.123.100:6826/2086552335,v1:192.168.123.100:6827/2086552335] boot 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4168843c-8fa2-44f0-b4ba-f52b27d6011b"}]': finished 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: osdmap e24: 5 total, 4 up, 5 in 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:31.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:31 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/858964006' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: osdmap e23: 4 total, 3 up, 4 in 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4168843c-8fa2-44f0-b4ba-f52b27d6011b"}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/2043014804' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4168843c-8fa2-44f0-b4ba-f52b27d6011b"}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: osd.3 [v2:192.168.123.100:6826/2086552335,v1:192.168.123.100:6827/2086552335] boot 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4168843c-8fa2-44f0-b4ba-f52b27d6011b"}]': finished 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: osdmap e24: 5 total, 4 up, 5 in 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:31.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:31.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:31.808 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:31 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/858964006' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:33.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:32 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:17:33.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:32 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:17:33.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:32 vm00 ceph-mon[49980]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:33.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:32 vm00 ceph-mon[49980]: osdmap e25: 5 total, 4 up, 5 in 2026-03-10T05:17:33.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:33.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:33.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:33.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:32 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:17:33.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:32 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:17:33.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:32 vm03 ceph-mon[50983]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T05:17:33.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:32 vm03 ceph-mon[50983]: osdmap e25: 5 total, 4 up, 5 in 2026-03-10T05:17:33.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:33.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:33.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:34.936 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:34 vm03 ceph-mon[50983]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T05:17:34.936 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T05:17:34.936 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:34.936 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:34 vm03 ceph-mon[50983]: Deploying daemon osd.4 on vm03 2026-03-10T05:17:35.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:34 vm00 ceph-mon[49980]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T05:17:35.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T05:17:35.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:35.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:34 vm00 ceph-mon[49980]: Deploying daemon osd.4 on vm03 2026-03-10T05:17:35.749 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:17:35.749 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:17:35.749 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:35.749 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:35.749 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:35.749 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:35.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:17:35.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:17:35.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:35.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:35.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:35.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:36.225 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 4 on host 'vm03' 2026-03-10T05:17:36.283 DEBUG:teuthology.orchestra.run.vm03:osd.4> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.4.service 2026-03-10T05:17:36.284 INFO:tasks.cephadm:Deploying osd.5 on vm03 with /dev/vdd... 2026-03-10T05:17:36.284 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vdd 2026-03-10T05:17:36.921 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:36 vm03 ceph-mon[50983]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.6 KiB/s rd, 46 KiB/s wr, 4 op/s 2026-03-10T05:17:36.921 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:36.922 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:36.922 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:36.985 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:17:37.002 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm03:/dev/vdd 2026-03-10T05:17:37.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:36 vm00 ceph-mon[49980]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.6 KiB/s rd, 46 KiB/s wr, 4 op/s 2026-03-10T05:17:37.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:37.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:37.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:37.922 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 05:17:37 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-4[54942]: 2026-03-10T05:17:37.883+0000 7f22a2a503c0 -1 osd.4 0 log_to_monitors true 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.4 KiB/s rd, 39 KiB/s wr, 3 op/s 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='osd.4 [v2:192.168.123.103:6800/93707725,v1:192.168.123.103:6801/93707725]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: Detected new or changed devices on vm03 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "246fb941-ada4-46ea-8a51-e5bc09a6eb19"}]: dispatch 2026-03-10T05:17:39.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:38 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1744530890' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "246fb941-ada4-46ea-8a51-e5bc09a6eb19"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.4 KiB/s rd, 39 KiB/s wr, 3 op/s 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='osd.4 [v2:192.168.123.103:6800/93707725,v1:192.168.123.103:6801/93707725]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: Detected new or changed devices on vm03 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "246fb941-ada4-46ea-8a51-e5bc09a6eb19"}]: dispatch 2026-03-10T05:17:39.308 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:38 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1744530890' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "246fb941-ada4-46ea-8a51-e5bc09a6eb19"}]: dispatch 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='client.24177 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "246fb941-ada4-46ea-8a51-e5bc09a6eb19"}]': finished 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: osdmap e26: 6 total, 4 up, 6 in 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='osd.4 [v2:192.168.123.103:6800/93707725,v1:192.168.123.103:6801/93707725]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:40.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:39 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/3502845530' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='client.24177 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "246fb941-ada4-46ea-8a51-e5bc09a6eb19"}]': finished 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: osdmap e26: 6 total, 4 up, 6 in 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='osd.4 [v2:192.168.123.103:6800/93707725,v1:192.168.123.103:6801/93707725]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:40.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:39 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/3502845530' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:40.307 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 05:17:39 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-4[54942]: 2026-03-10T05:17:39.881+0000 7f2299453700 -1 osd.4 0 waiting for initial osdmap 2026-03-10T05:17:40.307 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 05:17:39 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-4[54942]: 2026-03-10T05:17:39.888+0000 7f2294dec700 -1 osd.4 27 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.1 KiB/s rd, 33 KiB/s wr, 2 op/s 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: osdmap e27: 6 total, 4 up, 6 in 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: osd.4 [v2:192.168.123.103:6800/93707725,v1:192.168.123.103:6801/93707725] boot 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:41.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:40 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.1 KiB/s rd, 33 KiB/s wr, 2 op/s 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: osdmap e27: 6 total, 4 up, 6 in 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: osd.4 [v2:192.168.123.103:6800/93707725,v1:192.168.123.103:6801/93707725] boot 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T05:17:41.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:40 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:42.199 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:41 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:17:42.199 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:41 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:17:42.199 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:41 vm03 ceph-mon[50983]: osdmap e29: 6 total, 5 up, 6 in 2026-03-10T05:17:42.199 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:42.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:41 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:17:42.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:41 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:17:42.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:41 vm00 ceph-mon[49980]: osdmap e29: 6 total, 5 up, 6 in 2026-03-10T05:17:42.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:43.046 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:42 vm03 ceph-mon[50983]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T05:17:43.046 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T05:17:43.046 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:43.046 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:42 vm03 ceph-mon[50983]: osdmap e30: 6 total, 5 up, 6 in 2026-03-10T05:17:43.046 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:43.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:42 vm00 ceph-mon[49980]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T05:17:43.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T05:17:43.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:43.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:42 vm00 ceph-mon[49980]: osdmap e30: 6 total, 5 up, 6 in 2026-03-10T05:17:43.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:44.146 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:43 vm03 ceph-mon[50983]: Deploying daemon osd.5 on vm03 2026-03-10T05:17:44.146 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:43 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:44.146 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:43 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:44.146 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:43 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:44.146 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:43 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:44.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:43 vm00 ceph-mon[49980]: Deploying daemon osd.5 on vm03 2026-03-10T05:17:44.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:43 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:44.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:43 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:44.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:43 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:44.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:43 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:44.607 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 5 on host 'vm03' 2026-03-10T05:17:44.671 DEBUG:teuthology.orchestra.run.vm03:osd.5> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.5.service 2026-03-10T05:17:44.673 INFO:tasks.cephadm:Deploying osd.6 on vm03 with /dev/vdc... 2026-03-10T05:17:44.673 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vdc 2026-03-10T05:17:44.968 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:44 vm03 ceph-mon[50983]: pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T05:17:44.968 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:44 vm03 ceph-mon[50983]: from='osd.5 [v2:192.168.123.103:6808/1363755916,v1:192.168.123.103:6809/1363755916]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T05:17:44.968 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:44 vm03 ceph-mon[50983]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T05:17:44.968 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:44 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:44.968 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:44 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:45.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:44 vm00 ceph-mon[49980]: pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T05:17:45.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:44 vm00 ceph-mon[49980]: from='osd.5 [v2:192.168.123.103:6808/1363755916,v1:192.168.123.103:6809/1363755916]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T05:17:45.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:44 vm00 ceph-mon[49980]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T05:17:45.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:44 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:45.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:44 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:45.351 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:17:45.365 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm03:/dev/vdc 2026-03-10T05:17:46.178 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: pgmap v53: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: osdmap e31: 6 total, 5 up, 6 in 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='osd.5 [v2:192.168.123.103:6808/1363755916,v1:192.168.123.103:6809/1363755916]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='client.24195 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:46.179 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:46 vm03 ceph-mon[50983]: Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED) 2026-03-10T05:17:46.179 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 05:17:46 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-5[57795]: 2026-03-10T05:17:46.148+0000 7fe85ee1c700 -1 osd.5 0 waiting for initial osdmap 2026-03-10T05:17:46.179 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 05:17:46 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-5[57795]: 2026-03-10T05:17:46.157+0000 7fe8577af700 -1 osd.5 32 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: pgmap v53: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: osdmap e31: 6 total, 5 up, 6 in 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='osd.5 [v2:192.168.123.103:6808/1363755916,v1:192.168.123.103:6809/1363755916]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='client.24195 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:46.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:46 vm00 ceph-mon[49980]: Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED) 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: osdmap e32: 6 total, 5 up, 6 in 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: Detected new or changed devices on vm03 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1691430850' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd"}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd"}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: osd.5 [v2:192.168.123.103:6808/1363755916,v1:192.168.123.103:6809/1363755916] boot 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd"}]': finished 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:47.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:47 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:17:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: osdmap e32: 6 total, 5 up, 6 in 2026-03-10T05:17:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: Detected new or changed devices on vm03 2026-03-10T05:17:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1691430850' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd"}]: dispatch 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd"}]: dispatch 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: osd.5 [v2:192.168.123.103:6808/1363755916,v1:192.168.123.103:6809/1363755916] boot 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd"}]': finished 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T05:17:47.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:47 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:48.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:48 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:17:48.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:48 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:17:48.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:48 vm00 ceph-mon[49980]: pgmap v57: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:48.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:48 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/3239220890' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:48.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:48 vm00 ceph-mon[49980]: osdmap e34: 7 total, 6 up, 7 in 2026-03-10T05:17:48.533 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:48 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:48.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:48 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:17:48.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:48 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:17:48.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:48 vm03 ceph-mon[50983]: pgmap v57: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:48.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:48 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/3239220890' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:48.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:48 vm03 ceph-mon[50983]: osdmap e34: 7 total, 6 up, 7 in 2026-03-10T05:17:48.558 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:48 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:50.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:49 vm00 ceph-mon[49980]: osdmap e35: 7 total, 6 up, 7 in 2026-03-10T05:17:50.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:49 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:50.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:49 vm03 ceph-mon[50983]: osdmap e35: 7 total, 6 up, 7 in 2026-03-10T05:17:50.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:49 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:50.939 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:50 vm03 ceph-mon[50983]: pgmap v60: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:50.940 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:50 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T05:17:50.940 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:50 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:50.940 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:50 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:51.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:50 vm00 ceph-mon[49980]: pgmap v60: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:51.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:50 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T05:17:51.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:50 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:51.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:50 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:51.883 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:51 vm03 ceph-mon[50983]: Deploying daemon osd.6 on vm03 2026-03-10T05:17:52.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:51 vm00 ceph-mon[49980]: Deploying daemon osd.6 on vm03 2026-03-10T05:17:52.887 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 6 on host 'vm03' 2026-03-10T05:17:52.982 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:52 vm03 ceph-mon[50983]: pgmap v61: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:52.982 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:52.982 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:52.982 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:52.982 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:52 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:52.984 DEBUG:teuthology.orchestra.run.vm03:osd.6> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.6.service 2026-03-10T05:17:52.986 INFO:tasks.cephadm:Deploying osd.7 on vm03 with /dev/vdb... 2026-03-10T05:17:52.986 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- lvm zap /dev/vdb 2026-03-10T05:17:53.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:52 vm00 ceph-mon[49980]: pgmap v61: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T05:17:53.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:53.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:53.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:53.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:52 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:53.911 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T05:17:53.926 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch daemon add osd vm03:/dev/vdb 2026-03-10T05:17:53.991 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:53.992 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:53 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:53.992 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:53 vm03 ceph-mon[50983]: from='osd.6 [v2:192.168.123.103:6816/1494493820,v1:192.168.123.103:6817/1494493820]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T05:17:53.992 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:53 vm03 ceph-mon[50983]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T05:17:53.992 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 05:17:53 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-6[60586]: 2026-03-10T05:17:53.737+0000 7faf2f7183c0 -1 osd.6 0 log_to_monitors true 2026-03-10T05:17:54.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:54.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:53 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:54.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:53 vm00 ceph-mon[49980]: from='osd.6 [v2:192.168.123.103:6816/1494493820,v1:192.168.123.103:6817/1494493820]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T05:17:54.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:53 vm00 ceph-mon[49980]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: pgmap v62: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail; 11 KiB/s, 0 objects/s recovering 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1 pg degraded) 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: Cluster is now healthy 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: osdmap e36: 7 total, 6 up, 7 in 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: from='osd.6 [v2:192.168.123.103:6816/1494493820,v1:192.168.123.103:6817/1494493820]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:55.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:54 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: pgmap v62: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail; 11 KiB/s, 0 objects/s recovering 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1 pg degraded) 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: Cluster is now healthy 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: osdmap e36: 7 total, 6 up, 7 in 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: from='osd.6 [v2:192.168.123.103:6816/1494493820,v1:192.168.123.103:6817/1494493820]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T05:17:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:54 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:55.807 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 05:17:55 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-6[60586]: 2026-03-10T05:17:55.414+0000 7faf2611b700 -1 osd.6 0 waiting for initial osdmap 2026-03-10T05:17:55.807 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 05:17:55 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-6[60586]: 2026-03-10T05:17:55.424+0000 7faf21ab4700 -1 osd.6 37 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='client.24213 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: Detected new or changed devices on vm03 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1279400958' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9390180b-a978-4738-8a38-b08d7675dd9c"}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9390180b-a978-4738-8a38-b08d7675dd9c"}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9390180b-a978-4738-8a38-b08d7675dd9c"}]': finished 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: osdmap e37: 8 total, 6 up, 8 in 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:56.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:55 vm00 ceph-mon[49980]: from='client.? 192.168.123.103:0/1734929137' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='client.24213 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: Detected new or changed devices on vm03 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1279400958' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9390180b-a978-4738-8a38-b08d7675dd9c"}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9390180b-a978-4738-8a38-b08d7675dd9c"}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9390180b-a978-4738-8a38-b08d7675dd9c"}]': finished 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: osdmap e37: 8 total, 6 up, 8 in 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:55 vm03 ceph-mon[50983]: from='client.? 192.168.123.103:0/1734929137' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T05:17:57.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:56 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:17:57.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:56 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:17:57.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:56 vm00 ceph-mon[49980]: pgmap v64: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail; 9.3 KiB/s, 0 objects/s recovering 2026-03-10T05:17:57.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:56 vm00 ceph-mon[49980]: osd.6 [v2:192.168.123.103:6816/1494493820,v1:192.168.123.103:6817/1494493820] boot 2026-03-10T05:17:57.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:56 vm00 ceph-mon[49980]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T05:17:57.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:56 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:57.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:56 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:57.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:56 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:17:57.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:56 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:17:57.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:56 vm03 ceph-mon[50983]: pgmap v64: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail; 9.3 KiB/s, 0 objects/s recovering 2026-03-10T05:17:57.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:56 vm03 ceph-mon[50983]: osd.6 [v2:192.168.123.103:6816/1494493820,v1:192.168.123.103:6817/1494493820] boot 2026-03-10T05:17:57.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:56 vm03 ceph-mon[50983]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T05:17:57.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:56 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T05:17:57.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:56 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:58.699 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:58 vm03 ceph-mon[50983]: pgmap v67: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 11 KiB/s, 0 objects/s recovering 2026-03-10T05:17:58.699 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:58 vm03 ceph-mon[50983]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T05:17:58.699 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:58 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:58.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:58 vm00 ceph-mon[49980]: pgmap v67: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 11 KiB/s, 0 objects/s recovering 2026-03-10T05:17:58.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:58 vm00 ceph-mon[49980]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T05:17:58.782 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:58 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:59.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:59 vm03 ceph-mon[50983]: osdmap e40: 8 total, 7 up, 8 in 2026-03-10T05:17:59.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:59 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:59.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:59 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T05:17:59.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:59 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:59.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:17:59 vm03 ceph-mon[50983]: Deploying daemon osd.7 on vm03 2026-03-10T05:17:59.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:59 vm00 ceph-mon[49980]: osdmap e40: 8 total, 7 up, 8 in 2026-03-10T05:17:59.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:59 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:17:59.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:59 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T05:17:59.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:59 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:17:59.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:17:59 vm00 ceph-mon[49980]: Deploying daemon osd.7 on vm03 2026-03-10T05:18:00.507 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:00 vm03 ceph-mon[50983]: pgmap v70: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T05:18:00.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:00 vm00 ceph-mon[49980]: pgmap v70: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T05:18:01.602 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:01.602 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:01.602 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:01.602 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:01 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:01.690 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 7 on host 'vm03' 2026-03-10T05:18:01.761 DEBUG:teuthology.orchestra.run.vm03:osd.7> sudo journalctl -f -n 0 -u ceph-1a50eb6e-1c40-11f1-854f-9d3053100916@osd.7.service 2026-03-10T05:18:01.763 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-10T05:18:01.763 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd stat -f json 2026-03-10T05:18:01.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:01.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:01.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:01.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:01 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:02.227 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:02.274 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":40,"num_osds":8,"num_up_osds":7,"osd_up_since":1773119876,"num_in_osds":8,"osd_in_since":1773119875,"num_remapped_pgs":0} 2026-03-10T05:18:02.308 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 05:18:02 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-7[63407]: 2026-03-10T05:18:02.126+0000 7f4edd2153c0 -1 osd.7 0 log_to_monitors true 2026-03-10T05:18:03.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:02 vm00 ceph-mon[49980]: pgmap v71: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T05:18:03.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:02 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:03.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:02 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:03.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:02 vm00 ceph-mon[49980]: from='osd.7 [v2:192.168.123.103:6824/2306571346,v1:192.168.123.103:6825/2306571346]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T05:18:03.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:02 vm00 ceph-mon[49980]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T05:18:03.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:02 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/3071150258' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T05:18:03.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:02 vm03 ceph-mon[50983]: pgmap v71: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T05:18:03.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:02 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:03.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:02 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:03.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:02 vm03 ceph-mon[50983]: from='osd.7 [v2:192.168.123.103:6824/2306571346,v1:192.168.123.103:6825/2306571346]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T05:18:03.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:02 vm03 ceph-mon[50983]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T05:18:03.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:02 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3071150258' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T05:18:03.275 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd stat -f json 2026-03-10T05:18:03.756 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:03.814 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":41,"num_osds":8,"num_up_osds":7,"osd_up_since":1773119876,"num_in_osds":8,"osd_in_since":1773119875,"num_remapped_pgs":0} 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: osdmap e41: 8 total, 7 up, 8 in 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='osd.7 [v2:192.168.123.103:6824/2306571346,v1:192.168.123.103:6825/2306571346]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: Detected new or changed devices on vm03 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:04.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:18:04.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:04.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:04.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:04.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:04.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:04.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:03 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1344621712' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 05:18:03 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-7[63407]: 2026-03-10T05:18:03.787+0000 7f4ed3c18700 -1 osd.7 0 waiting for initial osdmap 2026-03-10T05:18:04.057 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 05:18:03 vm03 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-osd-7[63407]: 2026-03-10T05:18:03.806+0000 7f4ecddae700 -1 osd.7 42 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: osdmap e41: 8 total, 7 up, 8 in 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='osd.7 [v2:192.168.123.103:6824/2306571346,v1:192.168.123.103:6825/2306571346]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: Detected new or changed devices on vm03 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:04.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:03 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1344621712' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T05:18:04.815 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd stat -f json 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: pgmap v73: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: osdmap e42: 8 total, 7 up, 8 in 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: osd.7 [v2:192.168.123.103:6824/2306571346,v1:192.168.123.103:6825/2306571346] boot 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T05:18:05.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:04 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: pgmap v73: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: osdmap e42: 8 total, 7 up, 8 in 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: osd.7 [v2:192.168.123.103:6824/2306571346,v1:192.168.123.103:6825/2306571346] boot 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T05:18:05.066 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:04 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T05:18:05.299 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:05.368 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":44,"num_osds":8,"num_up_osds":8,"osd_up_since":1773119884,"num_in_osds":8,"osd_in_since":1773119875,"num_remapped_pgs":0} 2026-03-10T05:18:05.368 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd dump --format=json 2026-03-10T05:18:05.534 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:05.876 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:05.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:05 vm00 ceph-mon[49980]: purged_snaps scrub starts 2026-03-10T05:18:05.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:05 vm00 ceph-mon[49980]: purged_snaps scrub ok 2026-03-10T05:18:05.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:05 vm00 ceph-mon[49980]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T05:18:05.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:05 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:05.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:05 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2600919621' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T05:18:05.876 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":44,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","created":"2026-03-10T05:15:36.915384+0000","modified":"2026-03-10T05:18:05.132999+0000","last_up_change":"2026-03-10T05:18:04.130273+0000","last_in_change":"2026-03-10T05:17:55.407789+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T05:17:23.319005+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"21","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"44f45ef3-82a5-4b34-87a0-62d9f3779646","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":9,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6803","nonce":698063037}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6805","nonce":698063037}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6809","nonce":698063037}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6807","nonce":698063037}]},"public_addr":"192.168.123.100:6803/698063037","cluster_addr":"192.168.123.100:6805/698063037","heartbeat_back_addr":"192.168.123.100:6809/698063037","heartbeat_front_addr":"192.168.123.100:6807/698063037","state":["exists","up"]},{"osd":1,"uuid":"1efabf27-c710-42d6-a867-b95e6b7de593","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":13,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6811","nonce":3501474593}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6813","nonce":3501474593}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6817","nonce":3501474593}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6815","nonce":3501474593}]},"public_addr":"192.168.123.100:6811/3501474593","cluster_addr":"192.168.123.100:6813/3501474593","heartbeat_back_addr":"192.168.123.100:6817/3501474593","heartbeat_front_addr":"192.168.123.100:6815/3501474593","state":["exists","up"]},{"osd":2,"uuid":"c055bfa2-ecbd-4aee-95cc-b2b7071e6a8b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6819","nonce":3185081230}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6821","nonce":3185081230}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6825","nonce":3185081230}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6823","nonce":3185081230}]},"public_addr":"192.168.123.100:6819/3185081230","cluster_addr":"192.168.123.100:6821/3185081230","heartbeat_back_addr":"192.168.123.100:6825/3185081230","heartbeat_front_addr":"192.168.123.100:6823/3185081230","state":["exists","up"]},{"osd":3,"uuid":"76f35679-51a1-4fac-bf59-17ca29626b1e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6827","nonce":2086552335}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6829","nonce":2086552335}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6833","nonce":2086552335}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6831","nonce":2086552335}]},"public_addr":"192.168.123.100:6827/2086552335","cluster_addr":"192.168.123.100:6829/2086552335","heartbeat_back_addr":"192.168.123.100:6833/2086552335","heartbeat_front_addr":"192.168.123.100:6831/2086552335","state":["exists","up"]},{"osd":4,"uuid":"4168843c-8fa2-44f0-b4ba-f52b27d6011b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6801","nonce":93707725}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6803","nonce":93707725}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6807","nonce":93707725}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6805","nonce":93707725}]},"public_addr":"192.168.123.103:6801/93707725","cluster_addr":"192.168.123.103:6803/93707725","heartbeat_back_addr":"192.168.123.103:6807/93707725","heartbeat_front_addr":"192.168.123.103:6805/93707725","state":["exists","up"]},{"osd":5,"uuid":"246fb941-ada4-46ea-8a51-e5bc09a6eb19","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":33,"up_thru":34,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6809","nonce":1363755916}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6811","nonce":1363755916}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6815","nonce":1363755916}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6813","nonce":1363755916}]},"public_addr":"192.168.123.103:6809/1363755916","cluster_addr":"192.168.123.103:6811/1363755916","heartbeat_back_addr":"192.168.123.103:6815/1363755916","heartbeat_front_addr":"192.168.123.103:6813/1363755916","state":["exists","up"]},{"osd":6,"uuid":"2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":38,"up_thru":39,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6817","nonce":1494493820}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6819","nonce":1494493820}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6823","nonce":1494493820}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6821","nonce":1494493820}]},"public_addr":"192.168.123.103:6817/1494493820","cluster_addr":"192.168.123.103:6819/1494493820","heartbeat_back_addr":"192.168.123.103:6823/1494493820","heartbeat_front_addr":"192.168.123.103:6821/1494493820","state":["exists","up"]},{"osd":7,"uuid":"9390180b-a978-4738-8a38-b08d7675dd9c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":43,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6825","nonce":2306571346}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6827","nonce":2306571346}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6831","nonce":2306571346}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6829","nonce":2306571346}]},"public_addr":"192.168.123.103:6825/2306571346","cluster_addr":"192.168.123.103:6827/2306571346","heartbeat_back_addr":"192.168.123.103:6831/2306571346","heartbeat_front_addr":"192.168.123.103:6829/2306571346","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:01.507497+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:10.925794+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:19.968251+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:30.069802+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:38.916649+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:45.410420+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:54.755294+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:18:03.144150+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:6801/3510305216":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6800/3510305216":"2026-03-11T05:16:35.066840+0000","192.168.123.100:0/3317065629":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6801/562512762":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/4021270644":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/2767503399":"2026-03-11T05:16:07.084499+0000","192.168.123.100:6800/3375188738":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/3884712231":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6800/562512762":"2026-03-11T05:16:07.084499+0000","192.168.123.100:6801/3375188738":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/2200184248":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/741308707":"2026-03-11T05:16:35.066840+0000","192.168.123.100:0/2968464388":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/2880433167":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/389225068":"2026-03-11T05:15:53.831055+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T05:18:05.942 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-10T05:17:23.319005+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '21', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-10T05:18:05.943 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd pool get .mgr pg_num 2026-03-10T05:18:06.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:05 vm03 ceph-mon[50983]: purged_snaps scrub starts 2026-03-10T05:18:06.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:05 vm03 ceph-mon[50983]: purged_snaps scrub ok 2026-03-10T05:18:06.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:05 vm03 ceph-mon[50983]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T05:18:06.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:05 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:06.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:05 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2600919621' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T05:18:06.097 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:06.440 INFO:teuthology.orchestra.run.vm00.stdout:pg_num: 1 2026-03-10T05:18:06.511 INFO:tasks.cephadm:Setting up client nodes... 2026-03-10T05:18:06.512 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T05:18:07.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:06 vm00 ceph-mon[49980]: pgmap v76: 1 pgs: 1 active+recovering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:07.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:06 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2093196276' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T05:18:07.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:06 vm00 ceph-mon[49980]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T05:18:07.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:06 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/313091829' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T05:18:07.037 INFO:teuthology.orchestra.run.vm00.stdout:[client.0] 2026-03-10T05:18:07.037 INFO:teuthology.orchestra.run.vm00.stdout: key = AQCPqa9paHzsARAAZNNeLqyE8aegNi/CBlNfxg== 2026-03-10T05:18:07.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:06 vm03 ceph-mon[50983]: pgmap v76: 1 pgs: 1 active+recovering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:07.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:06 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2093196276' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T05:18:07.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:06 vm03 ceph-mon[50983]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T05:18:07.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:06 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/313091829' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T05:18:07.105 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T05:18:07.105 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-10T05:18:07.105 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-10T05:18:07.146 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-10T05:18:07.146 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-10T05:18:07.146 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph mgr dump --format=json 2026-03-10T05:18:07.359 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:07.753 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:07.852 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":19,"active_gid":14214,"active_name":"vm00.vnepyw","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6800","nonce":979233736},{"type":"v1","addr":"192.168.123.100:6801","nonce":979233736}]},"active_addr":"192.168.123.100:6801/979233736","active_change":"2026-03-10T05:16:35.096700+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":14232,"name":"vm03.vqfmrv","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.100:8443/","prometheus":"http://192.168.123.100:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":5,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":3026295990}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":241172871}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":3315476743}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":3752502749}]}]}} 2026-03-10T05:18:07.853 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-10T05:18:07.853 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-10T05:18:07.853 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd dump --format=json 2026-03-10T05:18:08.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:07 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/4278256912' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T05:18:08.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:07 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/4278256912' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T05:18:08.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:07 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1203416043' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T05:18:08.048 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:08.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:07 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/4278256912' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T05:18:08.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:07 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/4278256912' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T05:18:08.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:07 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1203416043' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T05:18:08.443 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:08.443 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":45,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","created":"2026-03-10T05:15:36.915384+0000","modified":"2026-03-10T05:18:06.135728+0000","last_up_change":"2026-03-10T05:18:04.130273+0000","last_in_change":"2026-03-10T05:17:55.407789+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T05:17:23.319005+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"21","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"44f45ef3-82a5-4b34-87a0-62d9f3779646","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":9,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6803","nonce":698063037}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6805","nonce":698063037}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6809","nonce":698063037}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6807","nonce":698063037}]},"public_addr":"192.168.123.100:6803/698063037","cluster_addr":"192.168.123.100:6805/698063037","heartbeat_back_addr":"192.168.123.100:6809/698063037","heartbeat_front_addr":"192.168.123.100:6807/698063037","state":["exists","up"]},{"osd":1,"uuid":"1efabf27-c710-42d6-a867-b95e6b7de593","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":13,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6811","nonce":3501474593}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6813","nonce":3501474593}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6817","nonce":3501474593}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6815","nonce":3501474593}]},"public_addr":"192.168.123.100:6811/3501474593","cluster_addr":"192.168.123.100:6813/3501474593","heartbeat_back_addr":"192.168.123.100:6817/3501474593","heartbeat_front_addr":"192.168.123.100:6815/3501474593","state":["exists","up"]},{"osd":2,"uuid":"c055bfa2-ecbd-4aee-95cc-b2b7071e6a8b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6819","nonce":3185081230}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6821","nonce":3185081230}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6825","nonce":3185081230}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6823","nonce":3185081230}]},"public_addr":"192.168.123.100:6819/3185081230","cluster_addr":"192.168.123.100:6821/3185081230","heartbeat_back_addr":"192.168.123.100:6825/3185081230","heartbeat_front_addr":"192.168.123.100:6823/3185081230","state":["exists","up"]},{"osd":3,"uuid":"76f35679-51a1-4fac-bf59-17ca29626b1e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6827","nonce":2086552335}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6829","nonce":2086552335}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6833","nonce":2086552335}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6831","nonce":2086552335}]},"public_addr":"192.168.123.100:6827/2086552335","cluster_addr":"192.168.123.100:6829/2086552335","heartbeat_back_addr":"192.168.123.100:6833/2086552335","heartbeat_front_addr":"192.168.123.100:6831/2086552335","state":["exists","up"]},{"osd":4,"uuid":"4168843c-8fa2-44f0-b4ba-f52b27d6011b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6801","nonce":93707725}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6803","nonce":93707725}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6807","nonce":93707725}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6805","nonce":93707725}]},"public_addr":"192.168.123.103:6801/93707725","cluster_addr":"192.168.123.103:6803/93707725","heartbeat_back_addr":"192.168.123.103:6807/93707725","heartbeat_front_addr":"192.168.123.103:6805/93707725","state":["exists","up"]},{"osd":5,"uuid":"246fb941-ada4-46ea-8a51-e5bc09a6eb19","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":33,"up_thru":34,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6809","nonce":1363755916}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6811","nonce":1363755916}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6815","nonce":1363755916}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6813","nonce":1363755916}]},"public_addr":"192.168.123.103:6809/1363755916","cluster_addr":"192.168.123.103:6811/1363755916","heartbeat_back_addr":"192.168.123.103:6815/1363755916","heartbeat_front_addr":"192.168.123.103:6813/1363755916","state":["exists","up"]},{"osd":6,"uuid":"2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":38,"up_thru":39,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6817","nonce":1494493820}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6819","nonce":1494493820}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6823","nonce":1494493820}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6821","nonce":1494493820}]},"public_addr":"192.168.123.103:6817/1494493820","cluster_addr":"192.168.123.103:6819/1494493820","heartbeat_back_addr":"192.168.123.103:6823/1494493820","heartbeat_front_addr":"192.168.123.103:6821/1494493820","state":["exists","up"]},{"osd":7,"uuid":"9390180b-a978-4738-8a38-b08d7675dd9c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":43,"up_thru":44,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6825","nonce":2306571346}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6827","nonce":2306571346}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6831","nonce":2306571346}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6829","nonce":2306571346}]},"public_addr":"192.168.123.103:6825/2306571346","cluster_addr":"192.168.123.103:6827/2306571346","heartbeat_back_addr":"192.168.123.103:6831/2306571346","heartbeat_front_addr":"192.168.123.103:6829/2306571346","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:01.507497+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:10.925794+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:19.968251+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:30.069802+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:38.916649+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:45.410420+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:54.755294+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:18:03.144150+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:6801/3510305216":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6800/3510305216":"2026-03-11T05:16:35.066840+0000","192.168.123.100:0/3317065629":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6801/562512762":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/4021270644":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/2767503399":"2026-03-11T05:16:07.084499+0000","192.168.123.100:6800/3375188738":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/3884712231":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6800/562512762":"2026-03-11T05:16:07.084499+0000","192.168.123.100:6801/3375188738":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/2200184248":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/741308707":"2026-03-11T05:16:35.066840+0000","192.168.123.100:0/2968464388":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/2880433167":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/389225068":"2026-03-11T05:15:53.831055+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T05:18:08.530 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-10T05:18:08.530 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd dump --format=json 2026-03-10T05:18:08.726 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:09.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:08 vm00 ceph-mon[49980]: pgmap v79: 1 pgs: 1 active+recovering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:09.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:08 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1006704768' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T05:18:09.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:08 vm03 ceph-mon[50983]: pgmap v79: 1 pgs: 1 active+recovering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:09.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:08 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1006704768' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T05:18:09.101 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:09.101 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":45,"fsid":"1a50eb6e-1c40-11f1-854f-9d3053100916","created":"2026-03-10T05:15:36.915384+0000","modified":"2026-03-10T05:18:06.135728+0000","last_up_change":"2026-03-10T05:18:04.130273+0000","last_in_change":"2026-03-10T05:17:55.407789+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T05:17:23.319005+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"21","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"44f45ef3-82a5-4b34-87a0-62d9f3779646","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":9,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6803","nonce":698063037}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6805","nonce":698063037}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6809","nonce":698063037}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":698063037},{"type":"v1","addr":"192.168.123.100:6807","nonce":698063037}]},"public_addr":"192.168.123.100:6803/698063037","cluster_addr":"192.168.123.100:6805/698063037","heartbeat_back_addr":"192.168.123.100:6809/698063037","heartbeat_front_addr":"192.168.123.100:6807/698063037","state":["exists","up"]},{"osd":1,"uuid":"1efabf27-c710-42d6-a867-b95e6b7de593","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":13,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6811","nonce":3501474593}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6813","nonce":3501474593}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6817","nonce":3501474593}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3501474593},{"type":"v1","addr":"192.168.123.100:6815","nonce":3501474593}]},"public_addr":"192.168.123.100:6811/3501474593","cluster_addr":"192.168.123.100:6813/3501474593","heartbeat_back_addr":"192.168.123.100:6817/3501474593","heartbeat_front_addr":"192.168.123.100:6815/3501474593","state":["exists","up"]},{"osd":2,"uuid":"c055bfa2-ecbd-4aee-95cc-b2b7071e6a8b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6819","nonce":3185081230}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6821","nonce":3185081230}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6825","nonce":3185081230}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":3185081230},{"type":"v1","addr":"192.168.123.100:6823","nonce":3185081230}]},"public_addr":"192.168.123.100:6819/3185081230","cluster_addr":"192.168.123.100:6821/3185081230","heartbeat_back_addr":"192.168.123.100:6825/3185081230","heartbeat_front_addr":"192.168.123.100:6823/3185081230","state":["exists","up"]},{"osd":3,"uuid":"76f35679-51a1-4fac-bf59-17ca29626b1e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6827","nonce":2086552335}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6829","nonce":2086552335}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6833","nonce":2086552335}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":2086552335},{"type":"v1","addr":"192.168.123.100:6831","nonce":2086552335}]},"public_addr":"192.168.123.100:6827/2086552335","cluster_addr":"192.168.123.100:6829/2086552335","heartbeat_back_addr":"192.168.123.100:6833/2086552335","heartbeat_front_addr":"192.168.123.100:6831/2086552335","state":["exists","up"]},{"osd":4,"uuid":"4168843c-8fa2-44f0-b4ba-f52b27d6011b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6801","nonce":93707725}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6803","nonce":93707725}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6807","nonce":93707725}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":93707725},{"type":"v1","addr":"192.168.123.103:6805","nonce":93707725}]},"public_addr":"192.168.123.103:6801/93707725","cluster_addr":"192.168.123.103:6803/93707725","heartbeat_back_addr":"192.168.123.103:6807/93707725","heartbeat_front_addr":"192.168.123.103:6805/93707725","state":["exists","up"]},{"osd":5,"uuid":"246fb941-ada4-46ea-8a51-e5bc09a6eb19","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":33,"up_thru":34,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6809","nonce":1363755916}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6811","nonce":1363755916}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6815","nonce":1363755916}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":1363755916},{"type":"v1","addr":"192.168.123.103:6813","nonce":1363755916}]},"public_addr":"192.168.123.103:6809/1363755916","cluster_addr":"192.168.123.103:6811/1363755916","heartbeat_back_addr":"192.168.123.103:6815/1363755916","heartbeat_front_addr":"192.168.123.103:6813/1363755916","state":["exists","up"]},{"osd":6,"uuid":"2768cbf8-7fe3-4a77-bfb6-a35e6b5999fd","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":38,"up_thru":39,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6817","nonce":1494493820}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6819","nonce":1494493820}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6823","nonce":1494493820}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":1494493820},{"type":"v1","addr":"192.168.123.103:6821","nonce":1494493820}]},"public_addr":"192.168.123.103:6817/1494493820","cluster_addr":"192.168.123.103:6819/1494493820","heartbeat_back_addr":"192.168.123.103:6823/1494493820","heartbeat_front_addr":"192.168.123.103:6821/1494493820","state":["exists","up"]},{"osd":7,"uuid":"9390180b-a978-4738-8a38-b08d7675dd9c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":43,"up_thru":44,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6825","nonce":2306571346}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6827","nonce":2306571346}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6831","nonce":2306571346}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":2306571346},{"type":"v1","addr":"192.168.123.103:6829","nonce":2306571346}]},"public_addr":"192.168.123.103:6825/2306571346","cluster_addr":"192.168.123.103:6827/2306571346","heartbeat_back_addr":"192.168.123.103:6831/2306571346","heartbeat_front_addr":"192.168.123.103:6829/2306571346","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:01.507497+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:10.925794+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:19.968251+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:30.069802+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:38.916649+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:45.410420+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:17:54.755294+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T05:18:03.144150+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:6801/3510305216":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6800/3510305216":"2026-03-11T05:16:35.066840+0000","192.168.123.100:0/3317065629":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6801/562512762":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/4021270644":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/2767503399":"2026-03-11T05:16:07.084499+0000","192.168.123.100:6800/3375188738":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/3884712231":"2026-03-11T05:16:35.066840+0000","192.168.123.100:6800/562512762":"2026-03-11T05:16:07.084499+0000","192.168.123.100:6801/3375188738":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/2200184248":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/741308707":"2026-03-11T05:16:35.066840+0000","192.168.123.100:0/2968464388":"2026-03-11T05:16:07.084499+0000","192.168.123.100:0/2880433167":"2026-03-11T05:15:53.831055+0000","192.168.123.100:0/389225068":"2026-03-11T05:15:53.831055+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T05:18:09.149 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.0 flush_pg_stats 2026-03-10T05:18:09.149 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.1 flush_pg_stats 2026-03-10T05:18:09.149 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.2 flush_pg_stats 2026-03-10T05:18:09.149 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.3 flush_pg_stats 2026-03-10T05:18:09.149 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.4 flush_pg_stats 2026-03-10T05:18:09.149 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.5 flush_pg_stats 2026-03-10T05:18:09.150 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.6 flush_pg_stats 2026-03-10T05:18:09.150 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph tell osd.7 flush_pg_stats 2026-03-10T05:18:09.826 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:09.859 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:09.871 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:09.909 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:09.915 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:09.932 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:09.990 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:10.013 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:10.069 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:09 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/811948691' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T05:18:10.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:09 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/811948691' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T05:18:10.890 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:10 vm00 ceph-mon[49980]: pgmap v80: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:11.024 INFO:teuthology.orchestra.run.vm00.stdout:38654705679 2026-03-10T05:18:11.024 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.0 2026-03-10T05:18:11.032 INFO:teuthology.orchestra.run.vm00.stdout:73014444043 2026-03-10T05:18:11.032 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.2 2026-03-10T05:18:11.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:10 vm03 ceph-mon[50983]: pgmap v80: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:11.432 INFO:teuthology.orchestra.run.vm00.stdout:120259084296 2026-03-10T05:18:11.432 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.4 2026-03-10T05:18:11.632 INFO:teuthology.orchestra.run.vm00.stdout:141733920774 2026-03-10T05:18:11.632 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.5 2026-03-10T05:18:11.688 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:11.688 INFO:teuthology.orchestra.run.vm00.stdout:163208757252 2026-03-10T05:18:11.688 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.6 2026-03-10T05:18:11.694 INFO:teuthology.orchestra.run.vm00.stdout:103079215114 2026-03-10T05:18:11.694 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.3 2026-03-10T05:18:11.700 INFO:teuthology.orchestra.run.vm00.stdout:184683593732 2026-03-10T05:18:11.700 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.7 2026-03-10T05:18:11.706 INFO:teuthology.orchestra.run.vm00.stdout:55834574861 2026-03-10T05:18:11.706 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.1 2026-03-10T05:18:11.834 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:12.326 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:12.371 INFO:teuthology.orchestra.run.vm00.stdout:38654705679 2026-03-10T05:18:12.441 INFO:tasks.cephadm.ceph_manager.ceph:need seq 38654705679 got 38654705679 for osd.0 2026-03-10T05:18:12.441 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:12.592 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:12.788 INFO:teuthology.orchestra.run.vm00.stdout:73014444043 2026-03-10T05:18:12.806 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:12.831 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:12.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:12 vm00 ceph-mon[49980]: pgmap v81: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:12.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:12 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1053819157' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T05:18:12.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:12 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/4065780732' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T05:18:12.932 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:12.942 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:13.007 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444043 got 73014444043 for osd.2 2026-03-10T05:18:13.008 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:12 vm03 ceph-mon[50983]: pgmap v81: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:12 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1053819157' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T05:18:13.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:12 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/4065780732' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T05:18:13.471 INFO:teuthology.orchestra.run.vm00.stdout:120259084295 2026-03-10T05:18:13.691 INFO:tasks.cephadm.ceph_manager.ceph:need seq 120259084296 got 120259084295 for osd.4 2026-03-10T05:18:13.814 INFO:teuthology.orchestra.run.vm00.stdout:184683593731 2026-03-10T05:18:13.956 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:13 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2944018830' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T05:18:13.956 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:13 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/264377427' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T05:18:13.983 INFO:tasks.cephadm.ceph_manager.ceph:need seq 184683593732 got 184683593731 for osd.7 2026-03-10T05:18:14.013 INFO:teuthology.orchestra.run.vm00.stdout:141733920775 2026-03-10T05:18:14.076 INFO:teuthology.orchestra.run.vm00.stdout:163208757253 2026-03-10T05:18:14.104 INFO:tasks.cephadm.ceph_manager.ceph:need seq 141733920774 got 141733920775 for osd.5 2026-03-10T05:18:14.104 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:14.163 INFO:tasks.cephadm.ceph_manager.ceph:need seq 163208757252 got 163208757253 for osd.6 2026-03-10T05:18:14.163 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:14.171 INFO:teuthology.orchestra.run.vm00.stdout:103079215114 2026-03-10T05:18:14.181 INFO:teuthology.orchestra.run.vm00.stdout:55834574861 2026-03-10T05:18:14.272 INFO:tasks.cephadm.ceph_manager.ceph:need seq 55834574861 got 55834574861 for osd.1 2026-03-10T05:18:14.273 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:14.285 INFO:tasks.cephadm.ceph_manager.ceph:need seq 103079215114 got 103079215114 for osd.3 2026-03-10T05:18:14.285 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:14.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:13 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2944018830' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T05:18:14.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:13 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/264377427' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T05:18:14.692 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.4 2026-03-10T05:18:14.858 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:14.979 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:14 vm00 ceph-mon[49980]: pgmap v82: 1 pgs: 1 active+recovering; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 40 KiB/s, 0 objects/s recovering 2026-03-10T05:18:14.979 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:14 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/1728068898' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T05:18:14.979 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:14 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2734240223' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T05:18:14.979 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:14 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/4162683812' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T05:18:14.979 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:14 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/3087885753' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T05:18:14.984 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph osd last-stat-seq osd.7 2026-03-10T05:18:15.176 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:15.213 INFO:teuthology.orchestra.run.vm00.stdout:120259084296 2026-03-10T05:18:15.300 INFO:tasks.cephadm.ceph_manager.ceph:need seq 120259084296 got 120259084296 for osd.4 2026-03-10T05:18:15.300 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:14 vm03 ceph-mon[50983]: pgmap v82: 1 pgs: 1 active+recovering; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 40 KiB/s, 0 objects/s recovering 2026-03-10T05:18:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:14 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/1728068898' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T05:18:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:14 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2734240223' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T05:18:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:14 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/4162683812' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T05:18:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:14 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/3087885753' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T05:18:15.536 INFO:teuthology.orchestra.run.vm00.stdout:184683593732 2026-03-10T05:18:15.607 INFO:tasks.cephadm.ceph_manager.ceph:need seq 184683593732 got 184683593732 for osd.7 2026-03-10T05:18:15.607 DEBUG:teuthology.parallel:result is None 2026-03-10T05:18:15.607 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-10T05:18:15.607 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph pg dump --format=json 2026-03-10T05:18:15.769 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:16.116 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:15 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2190525075' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T05:18:16.116 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:15 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/824601512' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T05:18:16.117 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:16.120 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-10T05:18:16.193 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":83,"stamp":"2026-03-10T05:18:15.127120+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":7,"num_osds":8,"num_per_pool_osds":7,"num_per_pool_omap_osds":7,"kb":167739392,"kb_used":49284,"kb_used_data":5060,"kb_used_omap":0,"kb_used_meta":44160,"kb_avail":167690108,"statfs":{"total":171765137408,"available":171714670592,"internally_reserved":0,"allocated":5181440,"data_stored":3047584,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45219840},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":1,"num_bytes_recovered":327680,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"9.990772"},"pg_stats":[{"pgid":"1.0","version":"21'76","reported_seq":16,"reported_epoch":45,"state":"active+clean","last_fresh":"2026-03-10T05:18:12.212431+0000","last_change":"2026-03-10T05:18:12.212431+0000","last_active":"2026-03-10T05:18:12.212431+0000","last_peered":"2026-03-10T05:18:12.212431+0000","last_clean":"2026-03-10T05:18:12.212431+0000","last_became_active":"2026-03-10T05:18:06.146398+0000","last_became_peered":"2026-03-10T05:18:06.146398+0000","last_unstale":"2026-03-10T05:18:12.212431+0000","last_undegraded":"2026-03-10T05:18:12.212431+0000","last_fullsized":"2026-03-10T05:18:12.212431+0000","mapping_epoch":44,"log_start":"0'0","ondisk_log_start":"0'0","created":19,"last_epoch_clean":45,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T05:17:23.974226+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T05:17:23.974226+0000","last_clean_scrub_stamp":"2026-03-10T05:17:23.974226+0000","objects_scrubbed":0,"log_size":76,"ondisk_log_size":76,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T17:09:20.709315+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1605632,"data_stored":1591360,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":7}],"osd_stats":[{"osd":7,"up_from":43,"seq":184683593733,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6084,"kb_used_data":828,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961340,"statfs":{"total":21470642176,"available":21464412160,"internally_reserved":0,"allocated":847872,"data_stored":579802,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.96799999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56999999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72299999999999998}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0149999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97899999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.044}]}]},{"osd":6,"up_from":38,"seq":163208757253,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6084,"kb_used_data":828,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961340,"statfs":{"total":21470642176,"available":21464412160,"internally_reserved":0,"allocated":847872,"data_stored":579802,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83199999999999996}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46400000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85199999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.44800000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88700000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84299999999999997}]}]},{"osd":1,"up_from":13,"seq":55834574862,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6332,"kb_used_data":436,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20961092,"statfs":{"total":21470642176,"available":21464158208,"internally_reserved":0,"allocated":446464,"data_stored":182033,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 05:18:12 2026","interfaces":[{"interface":"back","average":{"1min":0.5,"5min":0.5,"15min":0.5},"min":{"1min":0.20799999999999999,"5min":0.20799999999999999,"15min":0.20799999999999999},"max":{"1min":1.0600000000000001,"5min":1.0600000000000001,"15min":1.0600000000000001},"last":1.0600000000000001},{"interface":"front","average":{"1min":0.50600000000000001,"5min":0.50600000000000001,"15min":0.50600000000000001},"min":{"1min":0.187,"5min":0.187,"15min":0.187},"max":{"1min":0.876,"5min":0.876,"15min":0.876},"last":0.83399999999999996}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.95199999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94099999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86299999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.96099999999999997}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97099999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0960000000000001}]}]},{"osd":0,"up_from":9,"seq":38654705680,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6728,"kb_used_data":832,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960696,"statfs":{"total":21470642176,"available":21463752704,"internally_reserved":0,"allocated":851968,"data_stored":580117,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59499999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53200000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54400000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51700000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.502}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66300000000000003}]}]},{"osd":2,"up_from":17,"seq":73014444044,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6268,"kb_used_data":436,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961156,"statfs":{"total":21470642176,"available":21464223744,"internally_reserved":0,"allocated":446464,"data_stored":181962,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.38200000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72899999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71999999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69299999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70699999999999996}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74399999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80100000000000005}]}]},{"osd":3,"up_from":24,"seq":103079215114,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5824,"kb_used_data":440,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961600,"statfs":{"total":21470642176,"available":21464678400,"internally_reserved":0,"allocated":450560,"data_stored":182277,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71999999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64200000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68600000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.621}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78400000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83999999999999997}]}]},{"osd":4,"up_from":28,"seq":120259084296,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":5816,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961608,"statfs":{"total":21470642176,"available":21464686592,"internally_reserved":0,"allocated":442368,"data_stored":181789,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.745}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76800000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73399999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61499999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79400000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48399999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.95699999999999996}]}]},{"osd":5,"up_from":33,"seq":141733920775,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6148,"kb_used_data":828,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961276,"statfs":{"total":21470642176,"available":21464346624,"internally_reserved":0,"allocated":847872,"data_stored":579802,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.505}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89000000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52000000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56699999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48099999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91300000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0249999999999999}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T05:18:16.193 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph pg dump --format=json 2026-03-10T05:18:16.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:15 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2190525075' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T05:18:16.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:15 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/824601512' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T05:18:16.356 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:16.708 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:16.713 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-10T05:18:16.762 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":83,"stamp":"2026-03-10T05:18:15.127120+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":7,"num_osds":8,"num_per_pool_osds":7,"num_per_pool_omap_osds":7,"kb":167739392,"kb_used":49284,"kb_used_data":5060,"kb_used_omap":0,"kb_used_meta":44160,"kb_avail":167690108,"statfs":{"total":171765137408,"available":171714670592,"internally_reserved":0,"allocated":5181440,"data_stored":3047584,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45219840},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":1,"num_bytes_recovered":327680,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"9.990772"},"pg_stats":[{"pgid":"1.0","version":"21'76","reported_seq":16,"reported_epoch":45,"state":"active+clean","last_fresh":"2026-03-10T05:18:12.212431+0000","last_change":"2026-03-10T05:18:12.212431+0000","last_active":"2026-03-10T05:18:12.212431+0000","last_peered":"2026-03-10T05:18:12.212431+0000","last_clean":"2026-03-10T05:18:12.212431+0000","last_became_active":"2026-03-10T05:18:06.146398+0000","last_became_peered":"2026-03-10T05:18:06.146398+0000","last_unstale":"2026-03-10T05:18:12.212431+0000","last_undegraded":"2026-03-10T05:18:12.212431+0000","last_fullsized":"2026-03-10T05:18:12.212431+0000","mapping_epoch":44,"log_start":"0'0","ondisk_log_start":"0'0","created":19,"last_epoch_clean":45,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T05:17:23.974226+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T05:17:23.974226+0000","last_clean_scrub_stamp":"2026-03-10T05:17:23.974226+0000","objects_scrubbed":0,"log_size":76,"ondisk_log_size":76,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T17:09:20.709315+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1605632,"data_stored":1591360,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":7}],"osd_stats":[{"osd":7,"up_from":43,"seq":184683593733,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6084,"kb_used_data":828,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961340,"statfs":{"total":21470642176,"available":21464412160,"internally_reserved":0,"allocated":847872,"data_stored":579802,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.96799999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56999999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72299999999999998}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0149999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97899999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.044}]}]},{"osd":6,"up_from":38,"seq":163208757253,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6084,"kb_used_data":828,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961340,"statfs":{"total":21470642176,"available":21464412160,"internally_reserved":0,"allocated":847872,"data_stored":579802,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83199999999999996}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46400000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85199999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.44800000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88700000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84299999999999997}]}]},{"osd":1,"up_from":13,"seq":55834574862,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6332,"kb_used_data":436,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20961092,"statfs":{"total":21470642176,"available":21464158208,"internally_reserved":0,"allocated":446464,"data_stored":182033,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 05:18:12 2026","interfaces":[{"interface":"back","average":{"1min":0.5,"5min":0.5,"15min":0.5},"min":{"1min":0.20799999999999999,"5min":0.20799999999999999,"15min":0.20799999999999999},"max":{"1min":1.0600000000000001,"5min":1.0600000000000001,"15min":1.0600000000000001},"last":1.0600000000000001},{"interface":"front","average":{"1min":0.50600000000000001,"5min":0.50600000000000001,"15min":0.50600000000000001},"min":{"1min":0.187,"5min":0.187,"15min":0.187},"max":{"1min":0.876,"5min":0.876,"15min":0.876},"last":0.83399999999999996}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.95199999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94099999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86299999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.96099999999999997}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97099999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0960000000000001}]}]},{"osd":0,"up_from":9,"seq":38654705680,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6728,"kb_used_data":832,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960696,"statfs":{"total":21470642176,"available":21463752704,"internally_reserved":0,"allocated":851968,"data_stored":580117,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59499999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53200000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54400000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51700000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.502}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66300000000000003}]}]},{"osd":2,"up_from":17,"seq":73014444044,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6268,"kb_used_data":436,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961156,"statfs":{"total":21470642176,"available":21464223744,"internally_reserved":0,"allocated":446464,"data_stored":181962,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.38200000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72899999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71999999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69299999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70699999999999996}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74399999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80100000000000005}]}]},{"osd":3,"up_from":24,"seq":103079215114,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5824,"kb_used_data":440,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961600,"statfs":{"total":21470642176,"available":21464678400,"internally_reserved":0,"allocated":450560,"data_stored":182277,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71999999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64200000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68600000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.621}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78400000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83999999999999997}]}]},{"osd":4,"up_from":28,"seq":120259084296,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":5816,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961608,"statfs":{"total":21470642176,"available":21464686592,"internally_reserved":0,"allocated":442368,"data_stored":181789,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.745}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76800000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73399999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61499999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79400000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48399999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.95699999999999996}]}]},{"osd":5,"up_from":33,"seq":141733920775,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6148,"kb_used_data":828,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961276,"statfs":{"total":21470642176,"available":21464346624,"internally_reserved":0,"allocated":847872,"data_stored":579802,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.505}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89000000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52000000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56699999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48099999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91300000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0249999999999999}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T05:18:16.763 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-10T05:18:16.763 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-10T05:18:16.763 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-10T05:18:16.763 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph health --format=json 2026-03-10T05:18:16.937 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/1a50eb6e-1c40-11f1-854f-9d3053100916/mon.vm00/config 2026-03-10T05:18:16.966 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:16 vm00 ceph-mon[49980]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-10T05:18:17.305 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:17.305 INFO:teuthology.orchestra.run.vm00.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-10T05:18:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:16 vm03 ceph-mon[50983]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-10T05:18:17.388 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-10T05:18:17.388 INFO:tasks.cephadm:Setup complete, yielding 2026-03-10T05:18:17.388 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T05:18:17.391 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-03-10T05:18:17.391 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph orch status' 2026-03-10T05:18:17.874 INFO:teuthology.orchestra.run.vm00.stdout:Backend: cephadm 2026-03-10T05:18:17.874 INFO:teuthology.orchestra.run.vm00.stdout:Available: Yes 2026-03-10T05:18:17.874 INFO:teuthology.orchestra.run.vm00.stdout:Paused: No 2026-03-10T05:18:17.941 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph orch ps' 2026-03-10T05:18:18.103 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:17 vm00 ceph-mon[49980]: from='client.14472 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:18.103 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:17 vm00 ceph-mon[49980]: from='client.14476 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:18.103 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:17 vm00 ceph-mon[49980]: from='client.? 192.168.123.100:0/2546049641' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T05:18:18.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:17 vm03 ceph-mon[50983]: from='client.14472 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:18.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:17 vm03 ceph-mon[50983]: from='client.14476 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:18.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:17 vm03 ceph-mon[50983]: from='client.? 192.168.123.100:0/2546049641' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.vm00 vm00 *:9093,9094 running (79s) 41s ago 2m 16.5M - ba2b418f427c 842dc5b41d13 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm00 vm00 running (2m) 41s ago 2m 6953k - 17.2.0 e1d6a67b021e 5841b872ffb8 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm03 vm03 running (98s) 16s ago 98s 7096k - 17.2.0 e1d6a67b021e 74364f2f63ba 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:grafana.vm00 vm00 *:3000 running (77s) 41s ago 111s 42.9M - 8.3.5 dad864ee21e9 496f432f6b55 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm00.vnepyw vm00 *:9283 running (2m) 41s ago 2m 459M - 17.2.0 e1d6a67b021e 7b451301580f 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm03.vqfmrv vm03 *:8443,9283 running (98s) 16s ago 98s 421M - 17.2.0 e1d6a67b021e b282924f4978 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm00 vm00 running (2m) 41s ago 2m 42.9M 2048M 17.2.0 e1d6a67b021e d3f5c725c145 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm03 vm03 running (96s) 16s ago 96s 41.0M 2048M 17.2.0 e1d6a67b021e b7b06fb0bf03 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm00 vm00 *:9100 running (108s) 41s ago 108s 17.4M - 1dbe0e931976 7779c8b96e95 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm03 vm03 *:9100 running (93s) 16s ago 93s 17.8M - 1dbe0e931976 a1bb01f97194 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (79s) 41s ago 79s 33.6M 4096M 17.2.0 e1d6a67b021e 613452d40395 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (70s) 41s ago 70s 35.9M 4096M 17.2.0 e1d6a67b021e 918a788b64b2 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (61s) 41s ago 61s 32.5M 4096M 17.2.0 e1d6a67b021e c3ed423ed05c 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (51s) 41s ago 51s 27.6M 4096M 17.2.0 e1d6a67b021e ad62f16a3575 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (43s) 16s ago 43s 34.1M 4096M 17.2.0 e1d6a67b021e b0fc5648abee 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (34s) 16s ago 34s 36.1M 4096M 17.2.0 e1d6a67b021e 6ee2cffc97f0 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (26s) 16s ago 26s 30.3M 4096M 17.2.0 e1d6a67b021e 739f1b578c30 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (18s) 16s ago 17s 15.7M 4096M 17.2.0 e1d6a67b021e b6e47307a3c7 2026-03-10T05:18:18.428 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.vm00 vm00 *:9095 running (87s) 41s ago 87s 34.3M - 514e6a882f6e ae8d8e6f8c48 2026-03-10T05:18:18.474 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph orch ls' 2026-03-10T05:18:18.955 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:18 vm00 ceph-mon[49980]: pgmap v84: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s, 0 objects/s recovering 2026-03-10T05:18:18.955 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:18 vm00 ceph-mon[49980]: from='client.14484 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager ?:9093,9094 1/1 42s ago 2m count:1 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:crash 2/2 42s ago 2m * 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:grafana ?:3000 1/1 42s ago 2m count:1 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:mgr 2/2 42s ago 2m count:2 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:mon 2/2 42s ago 2m vm00:192.168.123.100=vm00;vm03:192.168.123.103=vm03;count:2 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter ?:9100 2/2 42s ago 2m * 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:osd 8 42s ago - 2026-03-10T05:18:18.956 INFO:teuthology.orchestra.run.vm00.stdout:prometheus ?:9095 1/1 42s ago 2m count:1 2026-03-10T05:18:19.020 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph orch host ls' 2026-03-10T05:18:19.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:18 vm03 ceph-mon[50983]: pgmap v84: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s, 0 objects/s recovering 2026-03-10T05:18:19.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:18 vm03 ceph-mon[50983]: from='client.14484 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:19.516 INFO:teuthology.orchestra.run.vm00.stdout:HOST ADDR LABELS STATUS 2026-03-10T05:18:19.517 INFO:teuthology.orchestra.run.vm00.stdout:vm00 192.168.123.100 2026-03-10T05:18:19.517 INFO:teuthology.orchestra.run.vm00.stdout:vm03 192.168.123.103 2026-03-10T05:18:19.517 INFO:teuthology.orchestra.run.vm00.stdout:2 hosts in cluster 2026-03-10T05:18:19.591 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph orch device ls' 2026-03-10T05:18:20.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:19 vm00 ceph-mon[49980]: from='client.14488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:20.032 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:19 vm00 ceph-mon[49980]: from='client.14492 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REJECT REASONS 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdb hdd DWNBRSTVMM00001 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdc hdd DWNBRSTVMM00002 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdd hdd DWNBRSTVMM00003 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vde hdd DWNBRSTVMM00004 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm03 /dev/vdb hdd DWNBRSTVMM03001 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm03 /dev/vdc hdd DWNBRSTVMM03002 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm03 /dev/vdd hdd DWNBRSTVMM03003 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.069 INFO:teuthology.orchestra.run.vm00.stdout:vm03 /dev/vde hdd DWNBRSTVMM03004 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-10T05:18:20.123 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-10T05:18:20.126 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm00.local 2026-03-10T05:18:20.126 DEBUG:teuthology.orchestra.run.vm00:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-10T05:18:20.155 INFO:teuthology.orchestra.run.vm00.stderr:+ systemctl stop nfs-server 2026-03-10T05:18:20.162 INFO:tasks.vip:Running commands on role host.b host ubuntu@vm03.local 2026-03-10T05:18:20.162 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-10T05:18:20.187 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:19 vm03 ceph-mon[50983]: from='client.14488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:20.187 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:19 vm03 ceph-mon[50983]: from='client.14492 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:20.190 INFO:teuthology.orchestra.run.vm03.stderr:+ systemctl stop nfs-server 2026-03-10T05:18:20.196 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T05:18:20.198 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-03-10T05:18:20.198 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph fs volume create foofs' 2026-03-10T05:18:21.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:20 vm00 ceph-mon[49980]: pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s, 0 objects/s recovering 2026-03-10T05:18:21.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:20 vm00 ceph-mon[49980]: from='client.14496 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:21.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:20 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-10T05:18:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:20 vm03 ceph-mon[50983]: pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s, 0 objects/s recovering 2026-03-10T05:18:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:20 vm03 ceph-mon[50983]: from='client.14496 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:21.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:20 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-10T05:18:22.049 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-10T05:18:22.051 INFO:tasks.cephadm:Waiting for ceph service mds.foofs to start (timeout 300)... 2026-03-10T05:18:22.051 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch ls -f json 2026-03-10T05:18:22.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:21 vm00 ceph-mon[49980]: from='client.24309 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:22.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:21 vm00 ceph-mon[49980]: from='client.14504 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:22.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:21 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-10T05:18:22.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:21 vm00 ceph-mon[49980]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T05:18:22.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:21 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-10T05:18:22.246 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:21 vm00 ceph-1a50eb6e-1c40-11f1-854f-9d3053100916-mon-vm00[49976]: 2026-03-10T05:18:21.960+0000 7f802fa94700 -1 log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-10T05:18:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:21 vm03 ceph-mon[50983]: from='client.24309 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:21 vm03 ceph-mon[50983]: from='client.14504 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:21 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-10T05:18:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:21 vm03 ceph-mon[50983]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T05:18:22.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:21 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-10T05:18:22.636 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:22.636 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T05:16:00.303724Z", "last_refresh": "2026-03-10T05:17:36.559959Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T05:15:58.571684Z", "last_refresh": "2026-03-10T05:17:36.559987Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "status": {"created": "2026-03-10T05:15:59.361562Z", "last_refresh": "2026-03-10T05:17:36.560014Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T05:18:21.996028Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T05:18:21.992008Z", "running": 0, "size": 2}}, {"placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T05:15:58.255598Z", "last_refresh": "2026-03-10T05:17:36.559921Z", "running": 2, "size": 2}}, {"placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm03:192.168.123.103=vm03"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T05:16:17.303966Z", "last_refresh": "2026-03-10T05:17:36.559813Z", "running": 2, "size": 2}}, {"placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T05:15:59.847259Z", "last_refresh": "2026-03-10T05:17:36.560042Z", "ports": [9100], "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", "container_image_name": "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", "last_refresh": "2026-03-10T05:17:36.560094Z", "running": 8, "size": 8}, "unmanaged": true}, {"placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T05:15:58.885677Z", "last_refresh": "2026-03-10T05:17:36.560068Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T05:18:22.729 INFO:tasks.cephadm:mds.foofs has 0/2 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: pgmap v87: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: fsmap foofs:0 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: Saving service mds.foofs spec with placement count:2 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.fbzvlk", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.fbzvlk", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T05:18:23.089 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:22 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: pgmap v87: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: fsmap foofs:0 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: Saving service mds.foofs spec with placement count:2 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.fbzvlk", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.fbzvlk", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T05:18:23.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:22 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:23.730 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch ls -f json 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: Deploying daemon mds.foofs.vm00.fbzvlk on vm00 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='client.24315 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm03.awymkl", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm03.awymkl", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:24.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:23 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: Deploying daemon mds.foofs.vm00.fbzvlk on vm00 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='client.24315 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm03.awymkl", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm03.awymkl", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:24.294 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:23 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:24.365 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:24.366 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T05:16:00.303724Z", "last_refresh": "2026-03-10T05:17:36.559959Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T05:15:58.571684Z", "last_refresh": "2026-03-10T05:17:36.559987Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "status": {"created": "2026-03-10T05:15:59.361562Z", "last_refresh": "2026-03-10T05:17:36.560014Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T05:18:21.996028Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T05:18:21.992008Z", "running": 0, "size": 2}}, {"placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T05:15:58.255598Z", "last_refresh": "2026-03-10T05:17:36.559921Z", "running": 2, "size": 2}}, {"placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm03:192.168.123.103=vm03"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T05:16:17.303966Z", "last_refresh": "2026-03-10T05:17:36.559813Z", "running": 2, "size": 2}}, {"placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T05:15:59.847259Z", "last_refresh": "2026-03-10T05:17:36.560042Z", "ports": [9100], "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", "container_image_name": "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", "last_refresh": "2026-03-10T05:17:36.560094Z", "running": 8, "size": 8}, "unmanaged": true}, {"placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T05:15:58.885677Z", "last_refresh": "2026-03-10T05:17:36.560068Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T05:18:24.427 INFO:tasks.cephadm:mds.foofs has 0/2 2026-03-10T05:18:25.257 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: Deploying daemon mds.foofs.vm03.awymkl on vm03 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: pgmap v91: 65 pgs: 12 creating+peering, 13 active+clean, 40 unknown; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: mds.? [v2:192.168.123.100:6834/1391835301,v1:192.168.123.100:6835/1391835301] up:boot 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: mds.? [v2:192.168.123.103:6832/3852683437,v1:192.168.123.103:6833/3852683437] up:boot 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: daemon mds.foofs.vm00.fbzvlk assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: Cluster is now healthy 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: fsmap foofs:0 2 up:standby 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata", "who": "foofs.vm00.fbzvlk"}]: dispatch 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: fsmap foofs:1 {0=foofs.vm00.fbzvlk=up:creating} 1 up:standby 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata", "who": "foofs.vm03.awymkl"}]: dispatch 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: daemon mds.foofs.vm00.fbzvlk is now active in filesystem foofs as rank 0 2026-03-10T05:18:25.258 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:24 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: Deploying daemon mds.foofs.vm03.awymkl on vm03 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: pgmap v91: 65 pgs: 12 creating+peering, 13 active+clean, 40 unknown; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: mds.? [v2:192.168.123.100:6834/1391835301,v1:192.168.123.100:6835/1391835301] up:boot 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: mds.? [v2:192.168.123.103:6832/3852683437,v1:192.168.123.103:6833/3852683437] up:boot 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: daemon mds.foofs.vm00.fbzvlk assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: Cluster is now healthy 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: fsmap foofs:0 2 up:standby 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata", "who": "foofs.vm00.fbzvlk"}]: dispatch 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: fsmap foofs:1 {0=foofs.vm00.fbzvlk=up:creating} 1 up:standby 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "mds metadata", "who": "foofs.vm03.awymkl"}]: dispatch 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: daemon mds.foofs.vm00.fbzvlk is now active in filesystem foofs as rank 0 2026-03-10T05:18:25.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:24 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:25.428 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- ceph orch ls -f json 2026-03-10T05:18:26.090 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T05:18:26.090 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T05:16:00.303724Z", "last_refresh": "2026-03-10T05:18:25.961213Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T05:15:58.571684Z", "last_refresh": "2026-03-10T05:18:24.833167Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "status": {"created": "2026-03-10T05:15:59.361562Z", "last_refresh": "2026-03-10T05:18:25.961273Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T05:18:21.996028Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T05:18:21.992008Z", "last_refresh": "2026-03-10T05:18:24.833535Z", "running": 2, "size": 2}}, {"placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T05:15:58.255598Z", "last_refresh": "2026-03-10T05:18:24.833296Z", "running": 2, "size": 2}}, {"placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm03:192.168.123.103=vm03"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T05:16:17.303966Z", "last_refresh": "2026-03-10T05:18:24.833334Z", "running": 2, "size": 2}}, {"placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T05:15:59.847259Z", "last_refresh": "2026-03-10T05:18:24.833405Z", "ports": [9100], "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", "container_image_name": "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", "last_refresh": "2026-03-10T05:18:24.833433Z", "running": 8, "size": 8}, "unmanaged": true}, {"placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T05:15:58.885677Z", "last_refresh": "2026-03-10T05:18:25.961351Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T05:18:26.162 INFO:tasks.cephadm:mds.foofs has 2/2 2026-03-10T05:18:26.162 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T05:18:26.170 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-03-10T05:18:26.170 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2' 2026-03-10T05:18:26.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: from='client.14518 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:26.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: mds.? [v2:192.168.123.100:6834/1391835301,v1:192.168.123.100:6835/1391835301] up:active 2026-03-10T05:18:26.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: fsmap foofs:1 {0=foofs.vm00.fbzvlk=up:active} 1 up:standby 2026-03-10T05:18:26.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: fsmap foofs:1 {0=foofs.vm00.fbzvlk=up:active} 1 up:standby 2026-03-10T05:18:26.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:26.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:26.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:26.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:26.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:25 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: from='client.14518 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: mds.? [v2:192.168.123.100:6834/1391835301,v1:192.168.123.100:6835/1391835301] up:active 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: fsmap foofs:1 {0=foofs.vm00.fbzvlk=up:active} 1 up:standby 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: fsmap foofs:1 {0=foofs.vm00.fbzvlk=up:active} 1 up:standby 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:26.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:25 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:27.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:27 vm00 ceph-mon[49980]: pgmap v93: 65 pgs: 1 creating+activating, 12 creating+peering, 41 active+clean, 11 unknown; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 1 op/s 2026-03-10T05:18:27.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:27.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "pool": ".nfs"}]: dispatch 2026-03-10T05:18:27.282 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:27 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:27 vm03 ceph-mon[50983]: pgmap v93: 65 pgs: 1 creating+activating, 12 creating+peering, 41 active+clean, 11 unknown; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 1 op/s 2026-03-10T05:18:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool create", "pool": ".nfs"}]: dispatch 2026-03-10T05:18:27.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:27 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:28.123 INFO:teuthology.orchestra.run.vm00.stdout:NFS Cluster Created Successfully 2026-03-10T05:18:28.185 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake' 2026-03-10T05:18:28.369 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:28 vm00 ceph-mon[49980]: from='client.14522 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:28.369 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:28 vm00 ceph-mon[49980]: from='client.14526 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "placement": "2", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:28.369 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:28 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "pool": ".nfs"}]': finished 2026-03-10T05:18:28.369 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:28 vm00 ceph-mon[49980]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T05:18:28.369 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:28 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-10T05:18:28.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:28 vm03 ceph-mon[50983]: from='client.14522 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T05:18:28.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:28 vm03 ceph-mon[50983]: from='client.14526 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "placement": "2", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:28.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:28 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool create", "pool": ".nfs"}]': finished 2026-03-10T05:18:28.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:28 vm03 ceph-mon[50983]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T05:18:28.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:28 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-10T05:18:28.721 INFO:teuthology.orchestra.run.vm00.stderr:Invalid command: Unexpected argument '--clusterid' 2026-03-10T05:18:28.721 INFO:teuthology.orchestra.run.vm00.stderr:nfs export create cephfs [] [--readonly] [--client_addr ...] [--squash ] : Create a CephFS export 2026-03-10T05:18:28.721 INFO:teuthology.orchestra.run.vm00.stderr:Error EINVAL: invalid command 2026-03-10T05:18:29.046 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T05:18:29.046 INFO:teuthology.orchestra.run.vm00.stdout: "bind": "/fake", 2026-03-10T05:18:29.046 INFO:teuthology.orchestra.run.vm00.stdout: "fs": "foofs", 2026-03-10T05:18:29.046 INFO:teuthology.orchestra.run.vm00.stdout: "path": "/", 2026-03-10T05:18:29.046 INFO:teuthology.orchestra.run.vm00.stdout: "cluster": "foo", 2026-03-10T05:18:29.046 INFO:teuthology.orchestra.run.vm00.stdout: "mode": "RW" 2026-03-10T05:18:29.046 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T05:18:29.115 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a50eb6e-1c40-11f1-854f-9d3053100916 -- bash -c 'while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done' 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: pgmap v95: 97 pgs: 32 unknown, 1 creating+activating, 5 creating+peering, 59 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 198 B/s wr, 4 op/s 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: Saving service nfs.foo spec with placement count:2 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: Creating key for client.nfs.foo.0.0.vm00.qcbcyg 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T05:18:29.300 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:29 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:29.360 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: pgmap v95: 97 pgs: 32 unknown, 1 creating+activating, 5 creating+peering, 59 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 198 B/s wr, 4 op/s 2026-03-10T05:18:29.360 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: Saving service nfs.foo spec with placement count:2 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: Creating key for client.nfs.foo.0.0.vm00.qcbcyg 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.qcbcyg-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T05:18:29.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:29 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:30.422 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:30 vm00 ceph-mon[49980]: Rados config object exists: conf-nfs.foo 2026-03-10T05:18:30.422 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:30 vm00 ceph-mon[49980]: Creating key for client.nfs.foo.0.0.vm00.qcbcyg-rgw 2026-03-10T05:18:30.422 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:30 vm00 ceph-mon[49980]: Deploying daemon nfs.foo.0.0.vm00.qcbcyg on vm00 2026-03-10T05:18:30.423 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:30 vm00 ceph-mon[49980]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T05:18:30.423 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:30 vm00 ceph-mon[49980]: pgmap v98: 97 pgs: 17 unknown, 3 creating+activating, 5 creating+peering, 72 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s wr, 8 op/s 2026-03-10T05:18:30.423 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:30 vm00 ceph-mon[49980]: from='client.24343 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:30.482 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:30 vm03 ceph-mon[50983]: Rados config object exists: conf-nfs.foo 2026-03-10T05:18:30.482 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:30 vm03 ceph-mon[50983]: Creating key for client.nfs.foo.0.0.vm00.qcbcyg-rgw 2026-03-10T05:18:30.482 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:30 vm03 ceph-mon[50983]: Deploying daemon nfs.foo.0.0.vm00.qcbcyg on vm00 2026-03-10T05:18:30.482 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:30 vm03 ceph-mon[50983]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T05:18:30.482 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:30 vm03 ceph-mon[50983]: pgmap v98: 97 pgs: 17 unknown, 3 creating+activating, 5 creating+peering, 72 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s wr, 8 op/s 2026-03-10T05:18:30.482 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:30 vm03 ceph-mon[50983]: from='client.24343 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: mgrmap e20: vm00.vnepyw(active, since 115s), standbys: vm03.vqfmrv 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: Creating key for client.nfs.foo.1.0.vm03.hirhdy 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: Ensuring nfs.foo.1 is in the ganesha grace table 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: Rados config object exists: conf-nfs.foo 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: Creating key for client.nfs.foo.1.0.vm03.hirhdy-rgw 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: Deploying daemon nfs.foo.1.0.vm03.hirhdy on vm03 2026-03-10T05:18:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:31 vm03 ceph-mon[50983]: from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: mgrmap e20: vm00.vnepyw(active, since 115s), standbys: vm03.vqfmrv 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: Creating key for client.nfs.foo.1.0.vm03.hirhdy 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: Ensuring nfs.foo.1 is in the ganesha grace table 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: Rados config object exists: conf-nfs.foo 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: Creating key for client.nfs.foo.1.0.vm03.hirhdy-rgw 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm03.hirhdy-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: Deploying daemon nfs.foo.1.0.vm03.hirhdy on vm03 2026-03-10T05:18:31.412 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:31 vm00 ceph-mon[49980]: from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:32.634 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:32 vm00 ceph-mon[49980]: pgmap v99: 97 pgs: 8 unknown, 3 creating+activating, 5 creating+peering, 81 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s wr, 6 op/s 2026-03-10T05:18:32.634 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:32.634 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:32.634 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:32.634 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:32.634 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:32 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:32.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:32 vm03 ceph-mon[50983]: pgmap v99: 97 pgs: 8 unknown, 3 creating+activating, 5 creating+peering, 81 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s wr, 6 op/s 2026-03-10T05:18:32.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:32.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:32.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:32.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:32.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:32 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.382 INFO:teuthology.orchestra.run.vm00.stdout:nfs.foo ?:2049 2/2 0s ago 5s count:2 2026-03-10T05:18:33.453 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-10T05:18:33.455 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm00.local 2026-03-10T05:18:33.455 DEBUG:teuthology.orchestra.run.vm00:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mkdir /mnt/foo' 2026-03-10T05:18:33.486 INFO:teuthology.orchestra.run.vm00.stderr:+ mkdir /mnt/foo 2026-03-10T05:18:33.488 DEBUG:teuthology.orchestra.run.vm00:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'while ! mount -t nfs $(hostname):/fake /mnt/foo -o sync ; do sleep 5 ; done' 2026-03-10T05:18:33.554 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:18:33.554 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.653 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:18:33.930 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:18:33.931 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:18:34.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:34 vm00 ceph-mon[49980]: pgmap v100: 97 pgs: 3 creating+activating, 94 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 3.7 KiB/s wr, 9 op/s 2026-03-10T05:18:34.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:34 vm00 ceph-mon[49980]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:34.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:34 vm03 ceph-mon[50983]: pgmap v100: 97 pgs: 3 creating+activating, 94 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 3.7 KiB/s wr, 9 op/s 2026-03-10T05:18:34.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:34 vm03 ceph-mon[50983]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T05:18:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:18:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:18:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:18:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:18:37.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:36 vm00 ceph-mon[49980]: pgmap v101: 97 pgs: 97 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 2.6 KiB/s rd, 2.9 KiB/s wr, 7 op/s 2026-03-10T05:18:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:36 vm03 ceph-mon[50983]: pgmap v101: 97 pgs: 97 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 2.6 KiB/s rd, 2.9 KiB/s wr, 7 op/s 2026-03-10T05:18:38.933 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:18:38.933 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:18:38.962 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:18:38.962 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:18:39.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:38 vm00 ceph-mon[49980]: pgmap v102: 97 pgs: 97 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 3.1 KiB/s wr, 8 op/s 2026-03-10T05:18:39.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:38 vm03 ceph-mon[50983]: pgmap v102: 97 pgs: 97 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 3.1 KiB/s wr, 8 op/s 2026-03-10T05:18:41.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:40 vm00 ceph-mon[49980]: pgmap v103: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 4.1 KiB/s rd, 2.7 KiB/s wr, 7 op/s 2026-03-10T05:18:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:40 vm03 ceph-mon[50983]: pgmap v103: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 4.1 KiB/s rd, 2.7 KiB/s wr, 7 op/s 2026-03-10T05:18:43.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:42 vm00 ceph-mon[49980]: pgmap v104: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 2.2 KiB/s wr, 6 op/s 2026-03-10T05:18:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:42 vm03 ceph-mon[50983]: pgmap v104: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 2.2 KiB/s wr, 6 op/s 2026-03-10T05:18:43.964 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:18:43.965 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:18:43.994 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:18:43.995 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:18:45.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:44 vm00 ceph-mon[49980]: pgmap v105: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 2.3 KiB/s wr, 6 op/s 2026-03-10T05:18:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:44 vm03 ceph-mon[50983]: pgmap v105: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 2.3 KiB/s wr, 6 op/s 2026-03-10T05:18:47.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:46 vm00 ceph-mon[49980]: pgmap v106: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-10T05:18:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:46 vm03 ceph-mon[50983]: pgmap v106: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-10T05:18:48.997 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:18:48.997 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:18:49.025 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:18:49.025 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:18:49.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:48 vm00 ceph-mon[49980]: pgmap v107: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-10T05:18:49.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:48 vm03 ceph-mon[50983]: pgmap v107: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-10T05:18:51.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:50 vm00 ceph-mon[49980]: pgmap v108: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 853 B/s wr, 1 op/s 2026-03-10T05:18:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:50 vm03 ceph-mon[50983]: pgmap v108: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 853 B/s wr, 1 op/s 2026-03-10T05:18:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:52 vm00 ceph-mon[49980]: pgmap v109: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:18:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:52 vm03 ceph-mon[50983]: pgmap v109: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:18:54.027 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:18:54.027 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:18:54.053 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:18:54.054 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:18:55.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:54 vm00 ceph-mon[49980]: pgmap v110: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:18:55.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:54 vm03 ceph-mon[50983]: pgmap v110: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:18:57.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:56 vm00 ceph-mon[49980]: pgmap v111: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:18:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:56 vm03 ceph-mon[50983]: pgmap v111: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:18:59.055 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:18:59.056 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:18:59.082 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:18:59.082 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:18:59.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:18:58 vm00 ceph-mon[49980]: pgmap v112: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:18:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:18:58 vm03 ceph-mon[50983]: pgmap v112: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:01.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:00 vm00 ceph-mon[49980]: pgmap v113: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:01.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:00 vm03 ceph-mon[50983]: pgmap v113: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:03.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:02 vm00 ceph-mon[49980]: pgmap v114: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:02 vm03 ceph-mon[50983]: pgmap v114: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:04.084 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:04.084 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:04.113 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:04.114 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:05.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:04 vm00 ceph-mon[49980]: pgmap v115: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:04 vm03 ceph-mon[50983]: pgmap v115: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:07.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:07 vm00 ceph-mon[49980]: pgmap v116: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:07 vm03 ceph-mon[50983]: pgmap v116: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:09.115 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:09.116 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:09.141 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:09.141 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:09.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:09 vm00 ceph-mon[49980]: pgmap v117: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:09 vm03 ceph-mon[50983]: pgmap v117: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:11.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:11 vm00 ceph-mon[49980]: pgmap v118: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:11 vm03 ceph-mon[50983]: pgmap v118: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:13.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:13 vm00 ceph-mon[49980]: pgmap v119: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:13 vm03 ceph-mon[50983]: pgmap v119: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:14.143 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:14.143 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:14.170 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:14.171 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:15.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:15 vm00 ceph-mon[49980]: pgmap v120: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:15 vm03 ceph-mon[50983]: pgmap v120: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:17.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:17 vm00 ceph-mon[49980]: pgmap v121: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:17 vm03 ceph-mon[50983]: pgmap v121: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:18.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:18 vm00 ceph-mon[49980]: pgmap v122: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:18.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:18 vm03 ceph-mon[50983]: pgmap v122: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:19.173 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:19.173 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:19.199 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:19.200 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:20.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:20 vm00 ceph-mon[49980]: pgmap v123: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:20.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:20 vm03 ceph-mon[50983]: pgmap v123: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:22.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:22 vm00 ceph-mon[49980]: pgmap v124: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:22.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:22 vm03 ceph-mon[50983]: pgmap v124: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:24.201 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:24.202 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:24.235 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:24.235 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:24.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:24 vm00 ceph-mon[49980]: pgmap v125: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:24 vm03 ceph-mon[50983]: pgmap v125: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:26.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:26 vm00 ceph-mon[49980]: pgmap v126: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:26 vm03 ceph-mon[50983]: pgmap v126: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:28.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:28 vm00 ceph-mon[49980]: pgmap v127: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:28.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:28 vm03 ceph-mon[50983]: pgmap v127: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:29.237 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:29.237 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:29.264 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:29.265 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:30.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:30 vm00 ceph-mon[49980]: pgmap v128: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:30.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:30 vm03 ceph-mon[50983]: pgmap v128: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:32.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:32 vm00 ceph-mon[49980]: pgmap v129: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:32.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:32 vm03 ceph-mon[50983]: pgmap v129: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:33.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:19:33.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:19:33.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:19:33.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:19:33.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:19:33.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:19:33.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:19:33.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:19:33.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:19:33.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:19:34.266 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:34.266 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:34.293 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:34.293 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:34.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:34 vm00 ceph-mon[49980]: pgmap v130: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:34.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:34 vm03 ceph-mon[50983]: pgmap v130: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]: dispatch 2026-03-10T05:19:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.b", "id": [1, 2]}]: dispatch 2026-03-10T05:19:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 5]}]: dispatch 2026-03-10T05:19:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-10T05:19:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:19:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]: dispatch 2026-03-10T05:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.b", "id": [1, 2]}]: dispatch 2026-03-10T05:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 5]}]: dispatch 2026-03-10T05:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-10T05:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:19:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:36 vm00 ceph-mon[49980]: pgmap v131: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]': finished 2026-03-10T05:19:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.b", "id": [1, 2]}]': finished 2026-03-10T05:19:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 5]}]': finished 2026-03-10T05:19:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]': finished 2026-03-10T05:19:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:36 vm00 ceph-mon[49980]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T05:19:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:36 vm03 ceph-mon[50983]: pgmap v131: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:36.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]': finished 2026-03-10T05:19:36.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.b", "id": [1, 2]}]': finished 2026-03-10T05:19:36.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 5]}]': finished 2026-03-10T05:19:36.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]': finished 2026-03-10T05:19:36.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:36 vm03 ceph-mon[50983]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T05:19:37.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:37 vm00 ceph-mon[49980]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T05:19:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:37 vm03 ceph-mon[50983]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T05:19:38.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:38 vm00 ceph-mon[49980]: pgmap v134: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 127 B/s wr, 0 op/s 2026-03-10T05:19:38.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:38 vm00 ceph-mon[49980]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T05:19:38.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:38 vm03 ceph-mon[50983]: pgmap v134: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 127 B/s wr, 0 op/s 2026-03-10T05:19:38.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:38 vm03 ceph-mon[50983]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T05:19:39.295 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:39.296 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:39.322 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:39.323 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:40.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:40 vm00 ceph-mon[49980]: pgmap v135: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:40.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:40 vm03 ceph-mon[50983]: pgmap v135: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:41 vm03 ceph-mon[50983]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T05:19:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:41 vm03 ceph-mon[50983]: Cluster is now healthy 2026-03-10T05:19:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:41 vm00 ceph-mon[49980]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T05:19:42.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:41 vm00 ceph-mon[49980]: Cluster is now healthy 2026-03-10T05:19:42.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:42 vm03 ceph-mon[50983]: pgmap v136: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 127 B/s wr, 0 op/s 2026-03-10T05:19:43.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:42 vm00 ceph-mon[49980]: pgmap v136: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 127 B/s wr, 0 op/s 2026-03-10T05:19:44.324 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:44.325 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:44.350 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:44.351 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:44 vm03 ceph-mon[50983]: pgmap v137: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:45.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:44 vm00 ceph-mon[49980]: pgmap v137: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:46 vm03 ceph-mon[50983]: pgmap v138: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 210 B/s wr, 0 op/s 2026-03-10T05:19:47.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:46 vm00 ceph-mon[49980]: pgmap v138: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 210 B/s wr, 0 op/s 2026-03-10T05:19:49.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:48 vm00 ceph-mon[49980]: pgmap v139: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 190 B/s wr, 0 op/s 2026-03-10T05:19:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:48 vm03 ceph-mon[50983]: pgmap v139: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 190 B/s wr, 0 op/s 2026-03-10T05:19:49.352 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:49.353 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:49.378 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:49.378 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:51.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:50 vm00 ceph-mon[49980]: pgmap v140: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:50 vm03 ceph-mon[50983]: pgmap v140: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:52 vm00 ceph-mon[49980]: pgmap v141: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:52 vm03 ceph-mon[50983]: pgmap v141: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:54.380 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:54.380 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:54.407 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:54.408 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:19:55.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:54 vm00 ceph-mon[49980]: pgmap v142: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:54 vm03 ceph-mon[50983]: pgmap v142: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:19:57.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:56 vm00 ceph-mon[49980]: pgmap v143: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:56 vm03 ceph-mon[50983]: pgmap v143: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:59.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:19:58 vm00 ceph-mon[49980]: pgmap v144: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:19:58 vm03 ceph-mon[50983]: pgmap v144: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:19:59.409 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:19:59.410 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:19:59.437 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:19:59.437 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:01.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:00 vm00 ceph-mon[49980]: pgmap v145: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:01.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:00 vm00 ceph-mon[49980]: overall HEALTH_OK 2026-03-10T05:20:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:00 vm03 ceph-mon[50983]: pgmap v145: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:00 vm03 ceph-mon[50983]: overall HEALTH_OK 2026-03-10T05:20:03.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:02 vm00 ceph-mon[49980]: pgmap v146: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:02 vm03 ceph-mon[50983]: pgmap v146: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:04.439 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:04.439 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:04.468 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:04.469 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:05.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:04 vm00 ceph-mon[49980]: pgmap v147: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:04 vm03 ceph-mon[50983]: pgmap v147: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:07.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:06 vm00 ceph-mon[49980]: pgmap v148: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:06 vm03 ceph-mon[50983]: pgmap v148: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:09.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:08 vm00 ceph-mon[49980]: pgmap v149: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:08 vm03 ceph-mon[50983]: pgmap v149: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:09.470 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:09.471 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:09.497 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:09.497 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:11.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:10 vm00 ceph-mon[49980]: pgmap v150: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:10 vm03 ceph-mon[50983]: pgmap v150: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:13.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:12 vm00 ceph-mon[49980]: pgmap v151: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:12 vm03 ceph-mon[50983]: pgmap v151: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:14.499 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:14.499 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:14.526 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:14.527 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:15.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:14 vm00 ceph-mon[49980]: pgmap v152: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:14 vm03 ceph-mon[50983]: pgmap v152: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:17.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:16 vm00 ceph-mon[49980]: pgmap v153: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:17.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:16 vm03 ceph-mon[50983]: pgmap v153: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:18 vm00 ceph-mon[49980]: pgmap v154: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:18 vm03 ceph-mon[50983]: pgmap v154: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:19.528 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:19.528 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:19.559 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:19.559 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:21.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:20 vm00 ceph-mon[49980]: pgmap v155: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:20 vm03 ceph-mon[50983]: pgmap v155: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:23.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:22 vm00 ceph-mon[49980]: pgmap v156: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:22 vm03 ceph-mon[50983]: pgmap v156: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:24.561 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:24.561 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:24.593 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:24.593 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:25.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:24 vm00 ceph-mon[49980]: pgmap v157: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:24 vm03 ceph-mon[50983]: pgmap v157: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:27.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:26 vm00 ceph-mon[49980]: pgmap v158: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:27.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:26 vm03 ceph-mon[50983]: pgmap v158: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:29.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:28 vm00 ceph-mon[49980]: pgmap v159: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:28 vm03 ceph-mon[50983]: pgmap v159: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:29.595 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:29.595 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:29.621 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:29.622 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:30 vm00 ceph-mon[49980]: pgmap v160: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:30 vm03 ceph-mon[50983]: pgmap v160: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:33.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:32 vm00 ceph-mon[49980]: pgmap v161: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:32 vm03 ceph-mon[50983]: pgmap v161: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:33.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:20:33.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:20:33.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:20:33.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:20:33.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:33 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:20:34.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:20:34.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:20:34.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:20:34.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:20:34.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:33 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:20:34.623 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:34.624 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:34.648 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:34.648 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:35.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:34 vm00 ceph-mon[49980]: pgmap v162: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:34 vm03 ceph-mon[50983]: pgmap v162: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:20:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:20:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:20:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:20:37.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:36 vm00 ceph-mon[49980]: pgmap v163: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:36 vm03 ceph-mon[50983]: pgmap v163: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:38 vm00 ceph-mon[49980]: pgmap v164: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:38 vm03 ceph-mon[50983]: pgmap v164: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:39.650 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:39.650 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:39.745 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:39.745 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:40 vm03 ceph-mon[50983]: pgmap v165: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:40 vm00 ceph-mon[49980]: pgmap v165: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:42 vm03 ceph-mon[50983]: pgmap v166: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:42 vm00 ceph-mon[49980]: pgmap v166: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:44.747 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:44.747 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:44.772 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:44.773 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:44 vm00 ceph-mon[49980]: pgmap v167: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:44 vm03 ceph-mon[50983]: pgmap v167: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:47.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:46 vm00 ceph-mon[49980]: pgmap v168: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:46 vm03 ceph-mon[50983]: pgmap v168: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:49.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:48 vm00 ceph-mon[49980]: pgmap v169: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:48 vm03 ceph-mon[50983]: pgmap v169: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:49.775 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:49.775 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:49.801 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:49.801 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:50 vm00 ceph-mon[49980]: pgmap v170: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:50 vm03 ceph-mon[50983]: pgmap v170: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:53.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:52 vm00 ceph-mon[49980]: pgmap v171: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:52 vm03 ceph-mon[50983]: pgmap v171: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:54.803 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:54.803 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:54.829 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:54.830 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:20:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:54 vm00 ceph-mon[49980]: pgmap v172: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:54 vm03 ceph-mon[50983]: pgmap v172: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:20:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:56 vm00 ceph-mon[49980]: pgmap v173: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:56 vm03 ceph-mon[50983]: pgmap v173: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:59.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:20:58 vm00 ceph-mon[49980]: pgmap v174: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:20:58 vm03 ceph-mon[50983]: pgmap v174: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:20:59.831 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:20:59.831 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:20:59.857 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:20:59.857 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:00 vm00 ceph-mon[49980]: pgmap v175: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:00 vm03 ceph-mon[50983]: pgmap v175: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:02 vm00 ceph-mon[49980]: pgmap v176: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:02 vm03 ceph-mon[50983]: pgmap v176: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:04.859 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:04.859 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:04.865 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:04 vm00 ceph-mon[49980]: pgmap v177: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:04.888 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:04.888 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:04 vm03 ceph-mon[50983]: pgmap v177: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:06 vm00 ceph-mon[49980]: pgmap v178: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:06 vm03 ceph-mon[50983]: pgmap v178: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:08.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:08 vm00 ceph-mon[49980]: pgmap v179: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:08.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:08 vm03 ceph-mon[50983]: pgmap v179: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:09.890 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:09.890 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:09.916 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:09.917 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:10.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:10 vm00 ceph-mon[49980]: pgmap v180: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:10.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:10 vm03 ceph-mon[50983]: pgmap v180: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:12 vm00 ceph-mon[49980]: pgmap v181: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:12 vm03 ceph-mon[50983]: pgmap v181: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:14.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:14 vm00 ceph-mon[49980]: pgmap v182: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:14.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:14 vm03 ceph-mon[50983]: pgmap v182: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:14.918 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:14.919 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:14.944 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:14.944 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:17.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:16 vm00 ceph-mon[49980]: pgmap v183: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:17.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:16 vm03 ceph-mon[50983]: pgmap v183: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:18 vm00 ceph-mon[49980]: pgmap v184: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:18 vm03 ceph-mon[50983]: pgmap v184: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:19.946 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:19.946 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:19.971 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:19.972 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:20 vm00 ceph-mon[49980]: pgmap v185: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:20 vm03 ceph-mon[50983]: pgmap v185: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:22 vm00 ceph-mon[49980]: pgmap v186: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:22 vm03 ceph-mon[50983]: pgmap v186: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:24.973 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:24.973 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:24.999 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:25.000 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:24 vm00 ceph-mon[49980]: pgmap v187: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:24 vm03 ceph-mon[50983]: pgmap v187: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:26 vm00 ceph-mon[49980]: pgmap v188: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:26 vm03 ceph-mon[50983]: pgmap v188: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:28 vm00 ceph-mon[49980]: pgmap v189: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:28 vm03 ceph-mon[50983]: pgmap v189: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:30.001 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:30.002 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:30.027 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:30.028 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:30 vm00 ceph-mon[49980]: pgmap v190: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:30 vm03 ceph-mon[50983]: pgmap v190: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:32.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:32 vm00 ceph-mon[49980]: pgmap v191: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:32.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:32 vm03 ceph-mon[50983]: pgmap v191: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:34.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:34 vm00 ceph-mon[49980]: pgmap v192: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:34.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:21:34.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:21:34.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:21:34.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:21:34.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:21:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:34 vm03 ceph-mon[50983]: pgmap v192: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:21:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:21:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:21:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:21:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:21:35.029 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:35.030 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:35.056 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:35.057 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:21:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:21:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:21:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:21:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:36 vm00 ceph-mon[49980]: pgmap v193: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:36 vm03 ceph-mon[50983]: pgmap v193: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:38.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:38 vm00 ceph-mon[49980]: pgmap v194: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:38.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:38 vm03 ceph-mon[50983]: pgmap v194: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:40.058 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:40.058 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:40.083 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:40.084 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:40.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:40 vm03 ceph-mon[50983]: pgmap v195: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:40.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:40 vm00 ceph-mon[49980]: pgmap v195: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:42.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:42 vm03 ceph-mon[50983]: pgmap v196: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:42.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:42 vm00 ceph-mon[49980]: pgmap v196: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:44.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:44 vm03 ceph-mon[50983]: pgmap v197: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:44 vm00 ceph-mon[49980]: pgmap v197: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:45.085 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:45.085 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:45.110 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:45.111 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:46 vm00 ceph-mon[49980]: pgmap v198: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:46 vm03 ceph-mon[50983]: pgmap v198: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:48.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:48 vm00 ceph-mon[49980]: pgmap v199: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:48.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:48 vm03 ceph-mon[50983]: pgmap v199: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:50.112 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:50.113 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:50.196 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:50.197 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:50.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:50 vm00 ceph-mon[49980]: pgmap v200: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:50 vm03 ceph-mon[50983]: pgmap v200: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:52 vm00 ceph-mon[49980]: pgmap v201: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:52 vm03 ceph-mon[50983]: pgmap v201: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:54 vm00 ceph-mon[49980]: pgmap v202: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:54 vm03 ceph-mon[50983]: pgmap v202: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:21:55.198 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:21:55.199 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:21:55.224 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:21:55.224 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:21:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:56 vm00 ceph-mon[49980]: pgmap v203: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:56 vm03 ceph-mon[50983]: pgmap v203: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:21:58 vm00 ceph-mon[49980]: pgmap v204: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:21:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:21:58 vm03 ceph-mon[50983]: pgmap v204: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:00.226 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:00.226 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:00.251 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:00.251 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:00 vm00 ceph-mon[49980]: pgmap v205: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:00 vm03 ceph-mon[50983]: pgmap v205: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:02 vm00 ceph-mon[49980]: pgmap v206: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:02 vm03 ceph-mon[50983]: pgmap v206: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:05.253 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:05.253 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:05.278 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:05.279 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:04 vm00 ceph-mon[49980]: pgmap v207: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:04 vm03 ceph-mon[50983]: pgmap v207: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:06 vm00 ceph-mon[49980]: pgmap v208: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:06 vm03 ceph-mon[50983]: pgmap v208: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:08 vm00 ceph-mon[49980]: pgmap v209: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:08 vm03 ceph-mon[50983]: pgmap v209: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:10.280 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:10.281 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:10.306 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:10.307 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:10 vm00 ceph-mon[49980]: pgmap v210: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:10 vm03 ceph-mon[50983]: pgmap v210: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:13 vm00 ceph-mon[49980]: pgmap v211: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:13 vm03 ceph-mon[50983]: pgmap v211: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:15 vm00 ceph-mon[49980]: pgmap v212: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:15 vm03 ceph-mon[50983]: pgmap v212: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:15.308 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:15.309 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:15.334 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:15.334 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:17 vm00 ceph-mon[49980]: pgmap v213: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:17 vm03 ceph-mon[50983]: pgmap v213: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:19 vm03 ceph-mon[50983]: pgmap v214: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:19 vm00 ceph-mon[49980]: pgmap v214: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:20.335 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:20.336 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:20.362 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:20.362 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:21 vm03 ceph-mon[50983]: pgmap v215: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:21 vm00 ceph-mon[49980]: pgmap v215: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:23 vm03 ceph-mon[50983]: pgmap v216: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:23.529 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:23 vm00 ceph-mon[49980]: pgmap v216: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:25 vm03 ceph-mon[50983]: pgmap v217: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:25.364 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:25.364 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:25.390 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:25.390 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:25 vm00 ceph-mon[49980]: pgmap v217: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:27 vm03 ceph-mon[50983]: pgmap v218: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:27 vm00 ceph-mon[49980]: pgmap v218: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:28.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:28 vm00 ceph-mon[49980]: pgmap v219: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:28.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:28 vm03 ceph-mon[50983]: pgmap v219: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:30.391 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:30.392 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:30.416 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:30.416 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:30.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:30 vm00 ceph-mon[49980]: pgmap v220: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:30.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:30 vm03 ceph-mon[50983]: pgmap v220: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:32.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:32 vm00 ceph-mon[49980]: pgmap v221: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:32.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:32 vm03 ceph-mon[50983]: pgmap v221: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:34.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:34 vm00 ceph-mon[49980]: pgmap v222: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:34.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:22:34.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:22:34.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:22:34.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:22:34.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:22:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:34 vm03 ceph-mon[50983]: pgmap v222: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:22:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:22:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:22:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:22:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:22:35.418 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:35.418 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:35.443 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:35.444 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:22:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:22:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:22:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:22:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:36 vm00 ceph-mon[49980]: pgmap v223: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:36 vm03 ceph-mon[50983]: pgmap v223: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:38.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:38 vm00 ceph-mon[49980]: pgmap v224: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:38.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:38 vm03 ceph-mon[50983]: pgmap v224: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:40.445 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:40.446 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:40.471 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:40.471 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:40.721 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:40 vm00 ceph-mon[49980]: pgmap v225: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:40.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:40 vm03 ceph-mon[50983]: pgmap v225: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:42.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:42 vm00 ceph-mon[49980]: pgmap v226: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:42.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:42 vm03 ceph-mon[50983]: pgmap v226: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:44 vm00 ceph-mon[49980]: pgmap v227: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:44 vm03 ceph-mon[50983]: pgmap v227: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:45.473 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:45.473 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:45.499 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:45.499 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:47.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:46 vm00 ceph-mon[49980]: pgmap v228: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:46 vm03 ceph-mon[50983]: pgmap v228: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:48 vm00 ceph-mon[49980]: pgmap v229: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:48 vm03 ceph-mon[50983]: pgmap v229: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:50.501 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:50.501 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:50.527 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:50.528 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:50.975 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:50 vm00 ceph-mon[49980]: pgmap v230: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:50 vm03 ceph-mon[50983]: pgmap v230: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:52 vm00 ceph-mon[49980]: pgmap v231: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:52 vm03 ceph-mon[50983]: pgmap v231: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:54 vm00 ceph-mon[49980]: pgmap v232: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:54 vm03 ceph-mon[50983]: pgmap v232: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:22:55.529 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:22:55.530 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:22:55.557 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:22:55.558 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:22:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:56 vm00 ceph-mon[49980]: pgmap v233: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:56 vm03 ceph-mon[50983]: pgmap v233: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:22:58 vm00 ceph-mon[49980]: pgmap v234: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:22:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:22:58 vm03 ceph-mon[50983]: pgmap v234: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:00.559 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:00.560 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:00.586 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:00.587 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:00 vm00 ceph-mon[49980]: pgmap v235: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:00 vm03 ceph-mon[50983]: pgmap v235: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:02 vm00 ceph-mon[49980]: pgmap v236: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:02 vm03 ceph-mon[50983]: pgmap v236: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:04 vm00 ceph-mon[49980]: pgmap v237: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:04 vm03 ceph-mon[50983]: pgmap v237: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:05.588 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:05.589 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:05.617 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:05.617 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:06 vm00 ceph-mon[49980]: pgmap v238: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:06 vm03 ceph-mon[50983]: pgmap v238: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:08 vm00 ceph-mon[49980]: pgmap v239: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:08 vm03 ceph-mon[50983]: pgmap v239: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:10.619 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:10.619 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:10.647 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:10.648 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:10 vm00 ceph-mon[49980]: pgmap v240: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:10 vm03 ceph-mon[50983]: pgmap v240: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:12 vm00 ceph-mon[49980]: pgmap v241: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:12 vm03 ceph-mon[50983]: pgmap v241: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:14 vm03 ceph-mon[50983]: pgmap v242: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:14 vm00 ceph-mon[49980]: pgmap v242: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:15.649 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:15.650 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:15.677 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:15.677 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:17.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:16 vm03 ceph-mon[50983]: pgmap v243: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:16 vm00 ceph-mon[49980]: pgmap v243: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:18 vm03 ceph-mon[50983]: pgmap v244: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:18 vm00 ceph-mon[49980]: pgmap v244: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:20.679 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:20.679 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:20.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:20.707 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:20 vm03 ceph-mon[50983]: pgmap v245: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:20 vm00 ceph-mon[49980]: pgmap v245: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:22 vm03 ceph-mon[50983]: pgmap v246: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:23.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:22 vm00 ceph-mon[49980]: pgmap v246: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:24 vm00 ceph-mon[49980]: pgmap v247: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:24 vm03 ceph-mon[50983]: pgmap v247: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:25.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:25.709 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:25.734 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:25.735 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:26 vm00 ceph-mon[49980]: pgmap v248: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:26 vm03 ceph-mon[50983]: pgmap v248: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:28 vm00 ceph-mon[49980]: pgmap v249: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:28 vm03 ceph-mon[50983]: pgmap v249: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:30.736 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:30.736 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:30.761 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:30.761 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:30 vm00 ceph-mon[49980]: pgmap v250: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:30 vm03 ceph-mon[50983]: pgmap v250: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:33.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:32 vm00 ceph-mon[49980]: pgmap v251: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:32 vm03 ceph-mon[50983]: pgmap v251: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:34 vm00 ceph-mon[49980]: pgmap v252: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:23:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:23:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:23:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:23:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:23:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:34 vm03 ceph-mon[50983]: pgmap v252: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:23:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:23:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:23:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:23:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:23:35.763 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:35.763 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:35.788 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:35.789 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:23:36.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:23:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:23:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:23:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:36 vm00 ceph-mon[49980]: pgmap v253: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:36 vm03 ceph-mon[50983]: pgmap v253: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:39.107 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:38 vm03 ceph-mon[50983]: pgmap v254: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:38 vm00 ceph-mon[49980]: pgmap v254: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:40.791 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:40.791 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:40.820 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:40.821 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:41.220 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:40 vm00 ceph-mon[49980]: pgmap v255: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:40 vm03 ceph-mon[50983]: pgmap v255: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:42 vm00 ceph-mon[49980]: pgmap v256: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:42 vm03 ceph-mon[50983]: pgmap v256: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:44 vm00 ceph-mon[49980]: pgmap v257: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:44 vm03 ceph-mon[50983]: pgmap v257: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:45.823 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:45.823 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:45.849 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:45.850 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:46 vm00 ceph-mon[49980]: pgmap v258: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:46 vm03 ceph-mon[50983]: pgmap v258: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:48 vm00 ceph-mon[49980]: pgmap v259: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:48 vm03 ceph-mon[50983]: pgmap v259: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:50.851 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:50.852 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:50.878 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:50.878 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:50 vm00 ceph-mon[49980]: pgmap v260: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:50 vm03 ceph-mon[50983]: pgmap v260: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:52 vm00 ceph-mon[49980]: pgmap v261: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:52 vm03 ceph-mon[50983]: pgmap v261: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:54 vm00 ceph-mon[49980]: pgmap v262: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:54 vm03 ceph-mon[50983]: pgmap v262: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:23:55.879 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:23:55.880 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:23:55.908 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:23:55.909 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:23:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:56 vm00 ceph-mon[49980]: pgmap v263: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:56 vm03 ceph-mon[50983]: pgmap v263: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:23:58 vm00 ceph-mon[49980]: pgmap v264: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:23:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:23:58 vm03 ceph-mon[50983]: pgmap v264: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:00.910 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:00.910 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:00.935 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:00.935 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:00 vm00 ceph-mon[49980]: pgmap v265: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:00 vm03 ceph-mon[50983]: pgmap v265: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:02 vm00 ceph-mon[49980]: pgmap v266: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:02 vm03 ceph-mon[50983]: pgmap v266: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:04 vm00 ceph-mon[49980]: pgmap v267: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:04 vm03 ceph-mon[50983]: pgmap v267: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:05.936 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:05.937 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:05.962 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:05.963 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:06 vm00 ceph-mon[49980]: pgmap v268: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:06 vm03 ceph-mon[50983]: pgmap v268: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:08 vm00 ceph-mon[49980]: pgmap v269: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:08 vm03 ceph-mon[50983]: pgmap v269: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:10.964 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:10.965 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:10.990 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:10.990 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:10 vm00 ceph-mon[49980]: pgmap v270: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:10 vm03 ceph-mon[50983]: pgmap v270: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:12 vm00 ceph-mon[49980]: pgmap v271: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:12 vm03 ceph-mon[50983]: pgmap v271: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:14 vm00 ceph-mon[49980]: pgmap v272: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:14 vm03 ceph-mon[50983]: pgmap v272: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:15.992 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:15.992 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:16.017 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:16.018 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:16 vm00 ceph-mon[49980]: pgmap v273: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:16 vm03 ceph-mon[50983]: pgmap v273: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:18 vm00 ceph-mon[49980]: pgmap v274: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:18 vm03 ceph-mon[50983]: pgmap v274: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:21.019 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:21.020 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:21.045 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:21.046 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:20 vm00 ceph-mon[49980]: pgmap v275: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:20 vm03 ceph-mon[50983]: pgmap v275: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:22 vm00 ceph-mon[49980]: pgmap v276: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:22 vm03 ceph-mon[50983]: pgmap v276: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:24 vm00 ceph-mon[49980]: pgmap v277: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:24 vm03 ceph-mon[50983]: pgmap v277: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:26.047 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:26.047 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:26.072 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:26.072 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:26 vm00 ceph-mon[49980]: pgmap v278: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:26 vm03 ceph-mon[50983]: pgmap v278: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:28 vm00 ceph-mon[49980]: pgmap v279: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:28 vm03 ceph-mon[50983]: pgmap v279: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:31.074 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:31.074 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:31.099 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:31.099 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:30 vm00 ceph-mon[49980]: pgmap v280: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:30 vm03 ceph-mon[50983]: pgmap v280: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:32 vm00 ceph-mon[49980]: pgmap v281: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:32 vm03 ceph-mon[50983]: pgmap v281: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:35 vm00 ceph-mon[49980]: pgmap v282: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:24:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:24:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:24:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:24:35.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:24:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:35 vm03 ceph-mon[50983]: pgmap v282: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:24:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:24:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:24:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:24:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:24:36.101 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:36.101 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:36.128 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:36.129 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:24:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:24:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:24:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:24:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:37 vm00 ceph-mon[49980]: pgmap v283: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:37 vm03 ceph-mon[50983]: pgmap v283: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:39 vm00 ceph-mon[49980]: pgmap v284: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:39 vm03 ceph-mon[50983]: pgmap v284: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:41.130 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:41.131 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:41.156 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:41.156 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:41 vm00 ceph-mon[49980]: pgmap v285: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:41 vm03 ceph-mon[50983]: pgmap v285: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:43 vm00 ceph-mon[49980]: pgmap v286: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:43 vm03 ceph-mon[50983]: pgmap v286: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:45 vm00 ceph-mon[49980]: pgmap v287: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:45 vm03 ceph-mon[50983]: pgmap v287: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:46.157 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:46.158 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:46.184 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:46.185 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:47 vm00 ceph-mon[49980]: pgmap v288: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:47 vm03 ceph-mon[50983]: pgmap v288: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:49 vm03 ceph-mon[50983]: pgmap v289: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:49.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:49 vm00 ceph-mon[49980]: pgmap v289: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:51.186 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:51.187 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:51.213 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:51.213 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:51 vm03 ceph-mon[50983]: pgmap v290: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:51 vm00 ceph-mon[49980]: pgmap v290: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:53 vm00 ceph-mon[49980]: pgmap v291: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:53 vm03 ceph-mon[50983]: pgmap v291: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:55 vm00 ceph-mon[49980]: pgmap v292: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:55 vm03 ceph-mon[50983]: pgmap v292: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:24:56.215 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:24:56.215 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:24:56.240 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:24:56.241 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:24:56.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:56 vm00 ceph-mon[49980]: pgmap v293: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:56 vm03 ceph-mon[50983]: pgmap v293: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:58.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:24:58 vm00 ceph-mon[49980]: pgmap v294: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:24:58.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:24:58 vm03 ceph-mon[50983]: pgmap v294: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:00.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:00 vm00 ceph-mon[49980]: pgmap v295: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:00.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:00 vm03 ceph-mon[50983]: pgmap v295: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:01.242 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:01.243 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:01.268 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:01.269 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:02.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:02 vm03 ceph-mon[50983]: pgmap v296: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:02.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:02 vm00 ceph-mon[49980]: pgmap v296: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:04.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:04 vm03 ceph-mon[50983]: pgmap v297: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:04.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:04 vm00 ceph-mon[49980]: pgmap v297: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:06.271 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:06.271 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:06.296 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:06.296 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:06.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:06 vm03 ceph-mon[50983]: pgmap v298: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:06 vm00 ceph-mon[49980]: pgmap v298: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:08.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:08 vm03 ceph-mon[50983]: pgmap v299: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:08.666 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:08 vm00 ceph-mon[49980]: pgmap v299: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:10.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:10 vm03 ceph-mon[50983]: pgmap v300: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:10.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:10 vm00 ceph-mon[49980]: pgmap v300: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:11.297 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:11.298 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:11.325 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:11.325 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:12.583 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:12 vm00 ceph-mon[49980]: pgmap v301: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:12.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:12 vm03 ceph-mon[50983]: pgmap v301: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:14.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:14 vm00 ceph-mon[49980]: pgmap v302: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:14 vm03 ceph-mon[50983]: pgmap v302: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:16.326 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:16.327 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:16.353 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:16.354 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:16 vm00 ceph-mon[49980]: pgmap v303: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:16 vm03 ceph-mon[50983]: pgmap v303: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:18.667 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:18 vm00 ceph-mon[49980]: pgmap v304: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:18.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:18 vm03 ceph-mon[50983]: pgmap v304: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:20.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:20 vm00 ceph-mon[49980]: pgmap v305: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:20.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:20 vm03 ceph-mon[50983]: pgmap v305: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:21.355 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:21.356 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:21.380 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:21.380 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:22.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:22 vm00 ceph-mon[49980]: pgmap v306: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:22 vm03 ceph-mon[50983]: pgmap v306: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:24.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:24 vm00 ceph-mon[49980]: pgmap v307: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:24 vm03 ceph-mon[50983]: pgmap v307: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:26.382 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:26.382 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:26.407 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:26.407 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:26 vm00 ceph-mon[49980]: pgmap v308: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:26 vm03 ceph-mon[50983]: pgmap v308: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:28.667 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:28 vm00 ceph-mon[49980]: pgmap v309: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:28.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:28 vm03 ceph-mon[50983]: pgmap v309: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:30.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:30 vm00 ceph-mon[49980]: pgmap v310: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:30.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:30 vm03 ceph-mon[50983]: pgmap v310: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:31.408 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:31.409 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:31.435 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:31.435 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:32.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:32 vm00 ceph-mon[49980]: pgmap v311: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:32.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:32 vm03 ceph-mon[50983]: pgmap v311: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:34.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:34 vm03 ceph-mon[50983]: pgmap v312: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:34.631 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:34 vm00 ceph-mon[49980]: pgmap v312: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:25:36.436 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:36.437 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:36.462 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:36.462 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:36 vm03 ceph-mon[50983]: pgmap v313: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:36 vm00 ceph-mon[49980]: pgmap v313: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:38 vm00 ceph-mon[49980]: pgmap v314: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:38 vm03 ceph-mon[50983]: pgmap v314: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:40 vm03 ceph-mon[50983]: pgmap v315: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:40 vm00 ceph-mon[49980]: pgmap v315: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:41.463 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:41.464 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:41.490 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:41.490 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:43.081 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:42 vm00 ceph-mon[49980]: pgmap v316: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:42 vm03 ceph-mon[50983]: pgmap v316: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:44 vm00 ceph-mon[49980]: pgmap v317: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:44 vm03 ceph-mon[50983]: pgmap v317: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:46.491 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:46.492 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:46.516 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:46.517 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:46 vm00 ceph-mon[49980]: pgmap v318: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:46 vm03 ceph-mon[50983]: pgmap v318: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:48 vm00 ceph-mon[49980]: pgmap v319: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:48 vm03 ceph-mon[50983]: pgmap v319: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:50 vm00 ceph-mon[49980]: pgmap v320: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:50 vm03 ceph-mon[50983]: pgmap v320: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:51.518 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:51.520 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:51.545 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:51.545 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:52 vm00 ceph-mon[49980]: pgmap v321: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:52 vm03 ceph-mon[50983]: pgmap v321: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:54 vm00 ceph-mon[49980]: pgmap v322: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:54 vm03 ceph-mon[50983]: pgmap v322: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:25:56.546 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:25:56.547 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:25:56.572 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:25:56.572 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:25:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:56 vm00 ceph-mon[49980]: pgmap v323: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:56 vm03 ceph-mon[50983]: pgmap v323: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:25:58 vm00 ceph-mon[49980]: pgmap v324: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:25:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:25:58 vm03 ceph-mon[50983]: pgmap v324: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:00 vm00 ceph-mon[49980]: pgmap v325: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:00 vm03 ceph-mon[50983]: pgmap v325: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:01.573 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:01.574 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:01.599 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:01.600 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:02 vm00 ceph-mon[49980]: pgmap v326: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:02 vm03 ceph-mon[50983]: pgmap v326: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:04 vm00 ceph-mon[49980]: pgmap v327: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:04 vm03 ceph-mon[50983]: pgmap v327: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:06.602 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:06.603 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:06.628 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:06.629 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:06 vm00 ceph-mon[49980]: pgmap v328: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:06 vm03 ceph-mon[50983]: pgmap v328: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:08 vm00 ceph-mon[49980]: pgmap v329: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:08 vm03 ceph-mon[50983]: pgmap v329: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:10 vm00 ceph-mon[49980]: pgmap v330: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:10 vm03 ceph-mon[50983]: pgmap v330: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:11.631 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:11.647 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:11.696 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:11.697 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:13.247 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:12 vm00 ceph-mon[49980]: pgmap v331: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:12 vm03 ceph-mon[50983]: pgmap v331: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:14 vm00 ceph-mon[49980]: pgmap v332: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:14 vm03 ceph-mon[50983]: pgmap v332: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:16.698 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:16.699 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:16.726 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:16.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:16 vm00 ceph-mon[49980]: pgmap v333: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:16 vm03 ceph-mon[50983]: pgmap v333: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:18 vm00 ceph-mon[49980]: pgmap v334: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:18 vm03 ceph-mon[50983]: pgmap v334: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:20 vm00 ceph-mon[49980]: pgmap v335: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:20 vm03 ceph-mon[50983]: pgmap v335: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:21.728 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:21.728 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:21.753 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:21.754 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:22 vm00 ceph-mon[49980]: pgmap v336: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:22 vm03 ceph-mon[50983]: pgmap v336: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:24 vm00 ceph-mon[49980]: pgmap v337: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:24 vm03 ceph-mon[50983]: pgmap v337: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:26.755 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:26.756 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:26.781 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:26.781 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:26 vm00 ceph-mon[49980]: pgmap v338: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:26 vm03 ceph-mon[50983]: pgmap v338: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:28 vm00 ceph-mon[49980]: pgmap v339: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:28 vm03 ceph-mon[50983]: pgmap v339: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:30 vm00 ceph-mon[49980]: pgmap v340: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:30 vm03 ceph-mon[50983]: pgmap v340: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:31.783 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:31.783 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:31.808 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:31.808 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:32 vm00 ceph-mon[49980]: pgmap v341: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:32 vm03 ceph-mon[50983]: pgmap v341: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:35.084 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:34 vm00 ceph-mon[49980]: pgmap v342: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:35.084 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:26:35.084 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:26:35.084 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:34 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:26:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:34 vm03 ceph-mon[50983]: pgmap v342: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:26:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:26:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:34 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:26:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:26:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:26:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:26:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:26:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:26:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:26:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:26:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:26:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:26:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:26:36.809 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:36.810 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:36.839 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:36.839 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:37 vm03 ceph-mon[50983]: pgmap v343: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:37 vm00 ceph-mon[49980]: pgmap v343: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:39 vm03 ceph-mon[50983]: pgmap v344: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:39 vm00 ceph-mon[49980]: pgmap v344: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:41 vm03 ceph-mon[50983]: pgmap v345: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:41 vm00 ceph-mon[49980]: pgmap v345: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:41.841 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:41.841 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:41.866 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:41.866 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:43 vm03 ceph-mon[50983]: pgmap v346: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:43.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:43 vm00 ceph-mon[49980]: pgmap v346: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:45 vm00 ceph-mon[49980]: pgmap v347: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:45 vm03 ceph-mon[50983]: pgmap v347: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:46.868 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:46.869 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:46.895 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:46.895 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:47 vm00 ceph-mon[49980]: pgmap v348: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:47 vm03 ceph-mon[50983]: pgmap v348: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:49.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:49 vm03 ceph-mon[50983]: pgmap v349: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:49 vm00 ceph-mon[49980]: pgmap v349: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:51 vm00 ceph-mon[49980]: pgmap v350: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:51 vm03 ceph-mon[50983]: pgmap v350: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:51.897 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:51.897 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:51.922 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:51.922 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:53 vm00 ceph-mon[49980]: pgmap v351: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:53 vm03 ceph-mon[50983]: pgmap v351: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:55 vm00 ceph-mon[49980]: pgmap v352: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:55 vm03 ceph-mon[50983]: pgmap v352: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:26:56.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:56 vm00 ceph-mon[49980]: pgmap v353: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:56 vm03 ceph-mon[50983]: pgmap v353: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:56.924 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:26:56.924 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:26:56.950 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:26:56.950 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:26:58.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:26:58 vm03 ceph-mon[50983]: pgmap v354: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:26:58.677 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:26:58 vm00 ceph-mon[49980]: pgmap v354: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:00.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:00 vm00 ceph-mon[49980]: pgmap v355: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:00.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:00 vm03 ceph-mon[50983]: pgmap v355: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:01.952 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:01.953 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:01.981 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:01.982 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:02.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:02 vm00 ceph-mon[49980]: pgmap v356: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:02.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:02 vm03 ceph-mon[50983]: pgmap v356: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:04.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:04 vm00 ceph-mon[49980]: pgmap v357: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:04.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:04 vm03 ceph-mon[50983]: pgmap v357: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:06 vm00 ceph-mon[49980]: pgmap v358: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:06 vm03 ceph-mon[50983]: pgmap v358: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:06.983 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:06.984 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:07.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:07.011 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:08.677 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:08 vm00 ceph-mon[49980]: pgmap v359: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:08.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:08 vm03 ceph-mon[50983]: pgmap v359: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:10.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:10 vm00 ceph-mon[49980]: pgmap v360: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:10.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:10 vm03 ceph-mon[50983]: pgmap v360: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:12.012 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:12.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:12.042 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:12.043 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:12.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:12 vm00 ceph-mon[49980]: pgmap v361: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:12.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:12 vm03 ceph-mon[50983]: pgmap v361: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:14.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:14 vm00 ceph-mon[49980]: pgmap v362: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:14 vm03 ceph-mon[50983]: pgmap v362: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:16 vm00 ceph-mon[49980]: pgmap v363: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:16 vm03 ceph-mon[50983]: pgmap v363: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:17.044 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:17.045 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:17.071 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:17.071 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:18.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:18 vm00 ceph-mon[49980]: pgmap v364: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:18.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:18 vm03 ceph-mon[50983]: pgmap v364: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:20.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:20 vm00 ceph-mon[49980]: pgmap v365: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:20.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:20 vm03 ceph-mon[50983]: pgmap v365: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:22.073 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:22.074 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:22.103 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:22.103 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:23.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:22 vm00 ceph-mon[49980]: pgmap v366: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:22 vm03 ceph-mon[50983]: pgmap v366: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:24 vm00 ceph-mon[49980]: pgmap v367: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:24 vm03 ceph-mon[50983]: pgmap v367: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:27.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:26 vm00 ceph-mon[49980]: pgmap v368: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:27.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:26 vm03 ceph-mon[50983]: pgmap v368: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:27.105 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:27.106 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:27.132 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:27.132 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:28 vm00 ceph-mon[49980]: pgmap v369: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:28 vm03 ceph-mon[50983]: pgmap v369: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:30 vm00 ceph-mon[49980]: pgmap v370: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:30 vm03 ceph-mon[50983]: pgmap v370: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:32.134 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:32.137 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:32.160 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:32.161 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:32 vm00 ceph-mon[49980]: pgmap v371: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:32 vm03 ceph-mon[50983]: pgmap v371: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:34 vm00 ceph-mon[49980]: pgmap v372: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:34 vm03 ceph-mon[50983]: pgmap v372: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:27:35.697 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:27:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:36 vm00 ceph-mon[49980]: pgmap v373: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:27:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:27:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:36 vm03 ceph-mon[50983]: pgmap v373: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:27:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:27:37.163 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:37.164 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:37.190 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:37.191 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:38 vm00 ceph-mon[49980]: pgmap v374: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:38 vm03 ceph-mon[50983]: pgmap v374: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:40 vm03 ceph-mon[50983]: pgmap v375: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:40 vm00 ceph-mon[49980]: pgmap v375: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:42.193 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:42.193 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:42.218 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:42.219 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:42 vm03 ceph-mon[50983]: pgmap v376: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:43.082 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:42 vm00 ceph-mon[49980]: pgmap v376: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:44 vm03 ceph-mon[50983]: pgmap v377: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:44 vm00 ceph-mon[49980]: pgmap v377: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:46 vm03 ceph-mon[50983]: pgmap v378: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:47.220 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:47.220 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:47.246 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:47.246 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:46 vm00 ceph-mon[49980]: pgmap v378: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:48 vm00 ceph-mon[49980]: pgmap v379: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:48 vm03 ceph-mon[50983]: pgmap v379: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:50 vm00 ceph-mon[49980]: pgmap v380: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:50 vm03 ceph-mon[50983]: pgmap v380: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:52.248 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:52.248 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:52.274 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:52.275 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:52 vm00 ceph-mon[49980]: pgmap v381: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:52 vm03 ceph-mon[50983]: pgmap v381: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:54 vm00 ceph-mon[49980]: pgmap v382: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:55.309 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:54 vm03 ceph-mon[50983]: pgmap v382: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:27:57.276 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:27:57.277 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:27:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:56 vm00 ceph-mon[49980]: pgmap v383: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:57.303 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:27:57.304 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:27:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:56 vm03 ceph-mon[50983]: pgmap v383: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:27:58 vm00 ceph-mon[49980]: pgmap v384: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:27:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:27:58 vm03 ceph-mon[50983]: pgmap v384: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:00 vm00 ceph-mon[49980]: pgmap v385: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:00 vm03 ceph-mon[50983]: pgmap v385: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:02.305 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:02.306 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:02.335 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:02.335 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:02 vm00 ceph-mon[49980]: pgmap v386: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:02 vm03 ceph-mon[50983]: pgmap v386: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:04 vm00 ceph-mon[49980]: pgmap v387: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:04 vm03 ceph-mon[50983]: pgmap v387: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:06 vm00 ceph-mon[49980]: pgmap v388: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:06 vm03 ceph-mon[50983]: pgmap v388: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:07.337 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:07.337 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:07.365 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:07.366 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:08 vm00 ceph-mon[49980]: pgmap v389: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:08 vm03 ceph-mon[50983]: pgmap v389: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:10 vm00 ceph-mon[49980]: pgmap v390: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:10 vm03 ceph-mon[50983]: pgmap v390: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:12.367 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:12.368 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:12.395 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:12.396 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:12 vm00 ceph-mon[49980]: pgmap v391: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:12 vm03 ceph-mon[50983]: pgmap v391: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:14 vm00 ceph-mon[49980]: pgmap v392: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:14 vm03 ceph-mon[50983]: pgmap v392: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:16 vm00 ceph-mon[49980]: pgmap v393: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:16 vm03 ceph-mon[50983]: pgmap v393: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:17.398 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:17.398 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:17.426 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:17.427 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:18 vm00 ceph-mon[49980]: pgmap v394: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:18 vm03 ceph-mon[50983]: pgmap v394: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:20 vm00 ceph-mon[49980]: pgmap v395: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:20 vm03 ceph-mon[50983]: pgmap v395: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:22.429 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:22.429 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:22.457 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:22.458 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:22 vm00 ceph-mon[49980]: pgmap v396: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:22 vm03 ceph-mon[50983]: pgmap v396: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:24 vm00 ceph-mon[49980]: pgmap v397: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:24 vm03 ceph-mon[50983]: pgmap v397: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:27 vm00 ceph-mon[49980]: pgmap v398: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:27 vm03 ceph-mon[50983]: pgmap v398: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:27.459 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:27.460 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:27.530 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:27.531 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:28.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:28 vm00 ceph-mon[49980]: pgmap v399: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:28.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:28 vm03 ceph-mon[50983]: pgmap v399: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:30.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:30 vm00 ceph-mon[49980]: pgmap v400: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:30.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:30 vm03 ceph-mon[50983]: pgmap v400: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:32.533 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:32.533 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:32.560 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:32.560 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:32.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:32 vm00 ceph-mon[49980]: pgmap v401: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:28:32.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:32 vm03 ceph-mon[50983]: pgmap v401: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:28:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:34 vm00 ceph-mon[49980]: pgmap v402: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:34 vm03 ceph-mon[50983]: pgmap v402: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:36.033 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:28:36.033 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:28:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:28:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:28:37.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:37 vm00 ceph-mon[49980]: pgmap v403: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:28:37.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:28:37.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:28:37.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:37 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:28:37.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:37 vm03 ceph-mon[50983]: pgmap v403: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:28:37.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:37 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:28:37.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:37 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:28:37.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:37 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:28:37.567 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:37.568 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:37.619 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:37.620 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:38.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:38.534 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:28:38.534 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:38.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:38.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:28:38.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:39.457 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:39 vm03 ceph-mon[50983]: pgmap v404: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:28:39.457 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:39.457 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:39 vm00 ceph-mon[49980]: pgmap v404: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:28:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:28:40.628 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:40 vm03 ceph-mon[50983]: pgmap v405: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:40.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:40 vm00 ceph-mon[49980]: pgmap v405: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:42.622 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:42.622 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:42.741 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:42.741 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:42.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:42 vm00 ceph-mon[49980]: pgmap v406: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:42.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:42 vm03 ceph-mon[50983]: pgmap v406: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:45.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:44 vm00 ceph-mon[49980]: pgmap v407: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:44 vm03 ceph-mon[50983]: pgmap v407: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:47.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:46 vm00 ceph-mon[49980]: pgmap v408: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:46 vm03 ceph-mon[50983]: pgmap v408: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:47.743 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:47.744 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:47.770 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:47.771 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:48 vm00 ceph-mon[49980]: pgmap v409: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:48 vm03 ceph-mon[50983]: pgmap v409: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:50.889 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:50 vm03 ceph-mon[50983]: pgmap v410: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:50 vm00 ceph-mon[49980]: pgmap v410: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:52.773 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:52.773 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:52.798 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:52.799 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:52 vm00 ceph-mon[49980]: pgmap v411: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:52 vm03 ceph-mon[50983]: pgmap v411: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:54 vm00 ceph-mon[49980]: pgmap v412: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:54 vm03 ceph-mon[50983]: pgmap v412: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:28:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:56 vm00 ceph-mon[49980]: pgmap v413: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:56 vm03 ceph-mon[50983]: pgmap v413: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:57.801 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:28:57.801 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:28:57.827 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:28:57.828 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:28:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:28:58 vm00 ceph-mon[49980]: pgmap v414: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:28:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:28:58 vm03 ceph-mon[50983]: pgmap v414: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:00 vm00 ceph-mon[49980]: pgmap v415: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:00 vm03 ceph-mon[50983]: pgmap v415: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:02.830 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:02.830 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:02.856 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:02.856 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:02 vm03 ceph-mon[50983]: pgmap v416: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:03.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:02 vm00 ceph-mon[49980]: pgmap v416: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:04 vm03 ceph-mon[50983]: pgmap v417: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:04 vm00 ceph-mon[49980]: pgmap v417: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:06 vm00 ceph-mon[49980]: pgmap v418: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:06 vm03 ceph-mon[50983]: pgmap v418: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:07.858 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:07.858 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:07.883 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:07.884 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:08 vm00 ceph-mon[49980]: pgmap v419: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:09.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:08 vm03 ceph-mon[50983]: pgmap v419: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:10 vm00 ceph-mon[49980]: pgmap v420: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:10 vm03 ceph-mon[50983]: pgmap v420: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:12.885 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:12.886 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:12.977 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:12.978 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:13.229 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:12 vm00 ceph-mon[49980]: pgmap v421: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:12 vm03 ceph-mon[50983]: pgmap v421: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:14 vm00 ceph-mon[49980]: pgmap v422: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:14 vm03 ceph-mon[50983]: pgmap v422: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:16 vm00 ceph-mon[49980]: pgmap v423: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:16 vm03 ceph-mon[50983]: pgmap v423: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:17.979 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:17.980 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:18.006 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:18.007 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:18 vm00 ceph-mon[49980]: pgmap v424: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:18 vm03 ceph-mon[50983]: pgmap v424: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:20 vm00 ceph-mon[49980]: pgmap v425: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:20 vm03 ceph-mon[50983]: pgmap v425: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:23.008 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:23.009 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:23.072 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:23.073 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:23 vm00 ceph-mon[49980]: pgmap v426: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:23 vm03 ceph-mon[50983]: pgmap v426: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:25 vm00 ceph-mon[49980]: pgmap v427: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:25 vm03 ceph-mon[50983]: pgmap v427: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:27 vm00 ceph-mon[49980]: pgmap v428: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:27 vm03 ceph-mon[50983]: pgmap v428: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:28.075 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:28.075 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:28.101 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:28.102 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:29 vm00 ceph-mon[49980]: pgmap v429: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:29 vm03 ceph-mon[50983]: pgmap v429: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:31 vm00 ceph-mon[49980]: pgmap v430: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:31 vm03 ceph-mon[50983]: pgmap v430: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:33.103 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:33.104 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:33.130 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:33.130 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:33 vm03 ceph-mon[50983]: pgmap v431: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:33 vm00 ceph-mon[49980]: pgmap v431: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:35 vm03 ceph-mon[50983]: pgmap v432: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:35 vm00 ceph-mon[49980]: pgmap v432: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:29:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:29:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:29:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:29:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:37 vm03 ceph-mon[50983]: pgmap v433: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:37 vm00 ceph-mon[49980]: pgmap v433: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:38.132 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:38.132 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:38.166 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:38.166 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:39 vm03 ceph-mon[50983]: pgmap v434: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:29:39.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:29:39.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:29:39.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:29:39.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:29:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:39 vm00 ceph-mon[49980]: pgmap v434: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:29:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:29:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:29:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:29:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:29:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:41 vm00 ceph-mon[49980]: pgmap v435: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:41 vm03 ceph-mon[50983]: pgmap v435: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:42.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:42 vm00 ceph-mon[49980]: pgmap v436: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:42.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:42 vm03 ceph-mon[50983]: pgmap v436: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:43.168 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:43.169 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:43.194 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:43.194 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:44 vm00 ceph-mon[49980]: pgmap v437: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:44 vm03 ceph-mon[50983]: pgmap v437: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:46 vm00 ceph-mon[49980]: pgmap v438: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:46 vm03 ceph-mon[50983]: pgmap v438: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:48.196 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:48.196 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:48.222 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:48.223 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:48.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:48 vm00 ceph-mon[49980]: pgmap v439: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:48.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:48 vm03 ceph-mon[50983]: pgmap v439: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:50 vm03 ceph-mon[50983]: pgmap v440: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:50 vm00 ceph-mon[49980]: pgmap v440: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:53.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:52 vm00 ceph-mon[49980]: pgmap v441: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:53.224 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:53.225 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:53.253 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:53.253 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:52 vm03 ceph-mon[50983]: pgmap v441: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:54 vm00 ceph-mon[49980]: pgmap v442: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:54 vm03 ceph-mon[50983]: pgmap v442: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:29:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:56 vm00 ceph-mon[49980]: pgmap v443: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:56 vm03 ceph-mon[50983]: pgmap v443: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:58.255 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:29:58.255 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:29:58.282 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:29:58.283 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:29:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:29:58 vm00 ceph-mon[49980]: pgmap v444: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:29:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:29:58 vm03 ceph-mon[50983]: pgmap v444: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:00 vm00 ceph-mon[49980]: pgmap v445: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:00 vm00 ceph-mon[49980]: overall HEALTH_OK 2026-03-10T05:30:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:00 vm03 ceph-mon[50983]: pgmap v445: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:00 vm03 ceph-mon[50983]: overall HEALTH_OK 2026-03-10T05:30:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:02 vm00 ceph-mon[49980]: pgmap v446: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:03.284 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:03.285 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:02 vm03 ceph-mon[50983]: pgmap v446: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:03.311 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:03.311 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:04 vm00 ceph-mon[49980]: pgmap v447: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:04 vm03 ceph-mon[50983]: pgmap v447: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:07 vm00 ceph-mon[49980]: pgmap v448: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:07 vm03 ceph-mon[50983]: pgmap v448: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:08.313 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:08.313 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:08.338 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:08.339 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:09.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:09 vm03 ceph-mon[50983]: pgmap v449: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:09 vm00 ceph-mon[49980]: pgmap v449: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:11 vm00 ceph-mon[49980]: pgmap v450: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:11 vm03 ceph-mon[50983]: pgmap v450: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:13.340 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:13.340 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:13.365 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:13.366 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:13 vm00 ceph-mon[49980]: pgmap v451: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:13 vm03 ceph-mon[50983]: pgmap v451: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:15 vm00 ceph-mon[49980]: pgmap v452: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:15 vm03 ceph-mon[50983]: pgmap v452: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:17 vm00 ceph-mon[49980]: pgmap v453: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:17 vm03 ceph-mon[50983]: pgmap v453: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:18.367 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:18.368 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:18.393 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:18.393 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:19 vm00 ceph-mon[49980]: pgmap v454: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:19 vm03 ceph-mon[50983]: pgmap v454: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:20 vm00 ceph-mon[49980]: pgmap v455: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:20 vm03 ceph-mon[50983]: pgmap v455: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:22 vm00 ceph-mon[49980]: pgmap v456: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:22 vm03 ceph-mon[50983]: pgmap v456: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:23.395 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:23.395 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:23.420 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:23.420 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:24 vm00 ceph-mon[49980]: pgmap v457: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:24 vm03 ceph-mon[50983]: pgmap v457: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:27.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:26 vm00 ceph-mon[49980]: pgmap v458: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:27.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:26 vm03 ceph-mon[50983]: pgmap v458: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:28.421 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:28.422 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:28.447 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:28.448 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:28 vm00 ceph-mon[49980]: pgmap v459: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:28 vm03 ceph-mon[50983]: pgmap v459: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:30 vm00 ceph-mon[49980]: pgmap v460: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:30 vm03 ceph-mon[50983]: pgmap v460: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:32 vm00 ceph-mon[49980]: pgmap v461: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:32 vm03 ceph-mon[50983]: pgmap v461: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:33.449 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:33.450 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:33.475 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:33.476 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:34 vm00 ceph-mon[49980]: pgmap v462: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:34 vm03 ceph-mon[50983]: pgmap v462: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:30:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:30:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:30:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:30:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:36 vm00 ceph-mon[49980]: pgmap v463: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:36 vm03 ceph-mon[50983]: pgmap v463: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:38.477 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:38.478 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:38.506 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:38.511 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:38.720 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:38 vm03 ceph-mon[50983]: pgmap v464: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:38.720 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:30:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:38 vm00 ceph-mon[49980]: pgmap v464: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:30:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:30:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:30:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:30:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:30:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:30:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:30:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:30:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:30:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:40 vm00 ceph-mon[49980]: pgmap v465: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:40 vm03 ceph-mon[50983]: pgmap v465: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:42 vm00 ceph-mon[49980]: pgmap v466: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:42 vm03 ceph-mon[50983]: pgmap v466: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:43.513 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:43.513 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:43.544 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:43.544 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:44 vm00 ceph-mon[49980]: pgmap v467: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:44 vm03 ceph-mon[50983]: pgmap v467: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:47.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:46 vm00 ceph-mon[49980]: pgmap v468: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:46 vm03 ceph-mon[50983]: pgmap v468: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:48.546 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:48.546 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:48.574 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:48.574 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:48 vm00 ceph-mon[49980]: pgmap v469: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:48 vm03 ceph-mon[50983]: pgmap v469: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:50 vm00 ceph-mon[49980]: pgmap v470: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:50 vm03 ceph-mon[50983]: pgmap v470: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:52 vm00 ceph-mon[49980]: pgmap v471: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:52 vm03 ceph-mon[50983]: pgmap v471: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:53.575 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:53.576 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:53.603 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:53.604 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:54 vm00 ceph-mon[49980]: pgmap v472: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:54 vm03 ceph-mon[50983]: pgmap v472: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:30:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:56 vm00 ceph-mon[49980]: pgmap v473: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:56 vm03 ceph-mon[50983]: pgmap v473: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:58.605 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:30:58.605 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:30:58.631 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:30:58.631 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:30:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:30:58 vm00 ceph-mon[49980]: pgmap v474: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:30:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:30:58 vm03 ceph-mon[50983]: pgmap v474: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:00 vm00 ceph-mon[49980]: pgmap v475: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:00 vm03 ceph-mon[50983]: pgmap v475: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:02 vm03 ceph-mon[50983]: pgmap v476: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:02 vm00 ceph-mon[49980]: pgmap v476: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:03.633 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:03.633 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:03.659 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:03.659 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:04 vm03 ceph-mon[50983]: pgmap v477: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:04 vm00 ceph-mon[49980]: pgmap v477: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:06 vm03 ceph-mon[50983]: pgmap v478: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:06 vm00 ceph-mon[49980]: pgmap v478: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:08.661 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:08.661 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:08.688 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:08.688 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:08 vm00 ceph-mon[49980]: pgmap v479: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:08 vm03 ceph-mon[50983]: pgmap v479: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:10 vm03 ceph-mon[50983]: pgmap v480: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:10 vm00 ceph-mon[49980]: pgmap v480: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:13.082 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:12 vm00 ceph-mon[49980]: pgmap v481: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:12 vm03 ceph-mon[50983]: pgmap v481: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:13.690 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:13.690 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:13.719 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:13.720 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:14 vm00 ceph-mon[49980]: pgmap v482: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:14 vm03 ceph-mon[50983]: pgmap v482: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:16 vm00 ceph-mon[49980]: pgmap v483: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:16 vm03 ceph-mon[50983]: pgmap v483: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:18.721 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:18.722 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:18.747 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:18.747 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:18 vm00 ceph-mon[49980]: pgmap v484: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:18 vm03 ceph-mon[50983]: pgmap v484: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:20 vm00 ceph-mon[49980]: pgmap v485: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:20 vm03 ceph-mon[50983]: pgmap v485: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:22 vm00 ceph-mon[49980]: pgmap v486: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:22 vm03 ceph-mon[50983]: pgmap v486: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:23.749 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:23.749 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:23.774 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:23.775 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:24 vm00 ceph-mon[49980]: pgmap v487: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:24 vm03 ceph-mon[50983]: pgmap v487: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:26 vm00 ceph-mon[49980]: pgmap v488: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:26 vm03 ceph-mon[50983]: pgmap v488: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:28.776 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:28.777 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:28.802 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:28.802 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:28 vm00 ceph-mon[49980]: pgmap v489: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:28 vm03 ceph-mon[50983]: pgmap v489: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:30 vm00 ceph-mon[49980]: pgmap v490: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:30 vm03 ceph-mon[50983]: pgmap v490: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:32 vm00 ceph-mon[49980]: pgmap v491: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:32 vm03 ceph-mon[50983]: pgmap v491: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:33.804 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:33.804 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:33.830 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:33.830 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:34 vm00 ceph-mon[49980]: pgmap v492: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:34 vm03 ceph-mon[50983]: pgmap v492: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:31:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:31:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:31:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:31:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:36 vm00 ceph-mon[49980]: pgmap v493: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:36 vm03 ceph-mon[50983]: pgmap v493: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:38.832 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:38.832 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:38.883 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:38.884 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:39.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:38 vm00 ceph-mon[49980]: pgmap v494: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:39.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:31:39.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:31:39.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:38 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:31:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:38 vm03 ceph-mon[50983]: pgmap v494: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:39.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:31:39.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:31:39.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:38 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:31:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:31:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:31:40.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:31:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:31:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:40 vm00 ceph-mon[49980]: pgmap v495: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:41.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:40 vm03 ceph-mon[50983]: pgmap v495: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:43 vm00 ceph-mon[49980]: pgmap v496: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:43 vm03 ceph-mon[50983]: pgmap v496: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:43.885 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:43.886 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:43.913 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:43.913 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:45 vm00 ceph-mon[49980]: pgmap v497: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:45 vm03 ceph-mon[50983]: pgmap v497: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:47.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:47 vm00 ceph-mon[49980]: pgmap v498: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:47.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:47 vm03 ceph-mon[50983]: pgmap v498: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:48.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:48 vm00 ceph-mon[49980]: pgmap v499: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:48.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:48 vm03 ceph-mon[50983]: pgmap v499: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:48.914 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:48.915 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:48.942 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:48.942 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:50 vm00 ceph-mon[49980]: pgmap v500: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:50 vm03 ceph-mon[50983]: pgmap v500: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:52 vm00 ceph-mon[49980]: pgmap v501: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:52 vm03 ceph-mon[50983]: pgmap v501: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:53.943 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:53.944 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:53.969 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:53.970 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:54 vm00 ceph-mon[49980]: pgmap v502: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:54 vm03 ceph-mon[50983]: pgmap v502: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:31:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:56 vm00 ceph-mon[49980]: pgmap v503: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:56 vm03 ceph-mon[50983]: pgmap v503: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:58.971 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:31:58.972 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:31:58.997 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:31:58.998 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:31:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:31:58 vm00 ceph-mon[49980]: pgmap v504: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:31:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:31:58 vm03 ceph-mon[50983]: pgmap v504: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:00 vm00 ceph-mon[49980]: pgmap v505: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:01.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:00 vm03 ceph-mon[50983]: pgmap v505: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:02 vm00 ceph-mon[49980]: pgmap v506: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:02 vm03 ceph-mon[50983]: pgmap v506: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:03.999 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:04.000 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:04.024 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:04.024 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:04 vm00 ceph-mon[49980]: pgmap v507: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:04 vm03 ceph-mon[50983]: pgmap v507: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:06 vm00 ceph-mon[49980]: pgmap v508: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:06 vm03 ceph-mon[50983]: pgmap v508: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:09.025 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:09.026 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:09.052 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:09.053 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:08 vm00 ceph-mon[49980]: pgmap v509: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:08 vm03 ceph-mon[50983]: pgmap v509: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:10 vm00 ceph-mon[49980]: pgmap v510: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:10 vm03 ceph-mon[50983]: pgmap v510: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:12 vm00 ceph-mon[49980]: pgmap v511: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:12 vm03 ceph-mon[50983]: pgmap v511: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:14.054 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:14.055 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:14.113 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:14.113 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:15 vm00 ceph-mon[49980]: pgmap v512: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:15 vm03 ceph-mon[50983]: pgmap v512: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:16 vm00 ceph-mon[49980]: pgmap v513: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:16 vm03 ceph-mon[50983]: pgmap v513: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:18.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:18 vm00 ceph-mon[49980]: pgmap v514: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:18.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:18 vm03 ceph-mon[50983]: pgmap v514: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:19.115 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:19.116 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:19.142 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:19.143 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:20 vm00 ceph-mon[49980]: pgmap v515: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:20 vm03 ceph-mon[50983]: pgmap v515: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:22 vm00 ceph-mon[49980]: pgmap v516: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:22 vm03 ceph-mon[50983]: pgmap v516: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:24.144 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:24.145 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:24.170 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:24.171 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:24 vm00 ceph-mon[49980]: pgmap v517: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:24 vm03 ceph-mon[50983]: pgmap v517: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:26 vm00 ceph-mon[49980]: pgmap v518: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:26 vm03 ceph-mon[50983]: pgmap v518: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:28 vm00 ceph-mon[49980]: pgmap v519: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:29.172 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:29.173 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:29.197 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:29.198 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:28 vm03 ceph-mon[50983]: pgmap v519: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:30 vm00 ceph-mon[49980]: pgmap v520: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:30 vm03 ceph-mon[50983]: pgmap v520: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:32 vm00 ceph-mon[49980]: pgmap v521: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:32 vm03 ceph-mon[50983]: pgmap v521: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:34.200 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:34.200 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:34.225 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:34.226 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:34 vm00 ceph-mon[49980]: pgmap v522: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:34 vm03 ceph-mon[50983]: pgmap v522: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:32:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:32:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:32:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:32:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:36 vm00 ceph-mon[49980]: pgmap v523: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:36 vm03 ceph-mon[50983]: pgmap v523: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:38.977 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:38 vm00 ceph-mon[49980]: pgmap v524: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:39.158 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:38 vm03 ceph-mon[50983]: pgmap v524: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:39.227 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:39.227 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:39.254 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:39.254 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:32:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:32:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:32:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:32:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:32:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:32:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:32:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:32:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:32:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:32:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:40 vm00 ceph-mon[49980]: pgmap v525: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:40 vm03 ceph-mon[50983]: pgmap v525: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:42 vm00 ceph-mon[49980]: pgmap v526: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:42 vm03 ceph-mon[50983]: pgmap v526: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:44.255 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:44.256 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:44.283 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:44.283 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:44 vm00 ceph-mon[49980]: pgmap v527: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:44 vm03 ceph-mon[50983]: pgmap v527: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:46 vm00 ceph-mon[49980]: pgmap v528: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:46 vm03 ceph-mon[50983]: pgmap v528: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:48 vm00 ceph-mon[49980]: pgmap v529: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:49.285 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:49.285 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:48 vm03 ceph-mon[50983]: pgmap v529: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:49.311 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:49.312 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:50 vm00 ceph-mon[49980]: pgmap v530: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:50 vm03 ceph-mon[50983]: pgmap v530: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:52 vm00 ceph-mon[49980]: pgmap v531: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:52 vm03 ceph-mon[50983]: pgmap v531: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:54.313 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:54.314 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:54.341 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:54.341 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:54 vm00 ceph-mon[49980]: pgmap v532: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:55.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:54 vm03 ceph-mon[50983]: pgmap v532: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:32:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:56 vm00 ceph-mon[49980]: pgmap v533: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:56 vm03 ceph-mon[50983]: pgmap v533: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:59.343 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:32:59.343 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:32:59.389 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:32:59.389 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:32:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:32:59 vm00 ceph-mon[49980]: pgmap v534: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:32:59.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:32:59 vm03 ceph-mon[50983]: pgmap v534: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:00 vm00 ceph-mon[49980]: pgmap v535: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:00 vm03 ceph-mon[50983]: pgmap v535: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:02 vm00 ceph-mon[49980]: pgmap v536: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:03.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:02 vm03 ceph-mon[50983]: pgmap v536: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:04.391 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:04.391 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:04.417 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:04.418 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:04 vm00 ceph-mon[49980]: pgmap v537: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:04 vm03 ceph-mon[50983]: pgmap v537: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:06 vm00 ceph-mon[49980]: pgmap v538: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:06 vm03 ceph-mon[50983]: pgmap v538: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:08 vm00 ceph-mon[49980]: pgmap v539: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:08 vm03 ceph-mon[50983]: pgmap v539: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:09.419 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:09.420 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:09.446 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:09.447 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:10 vm00 ceph-mon[49980]: pgmap v540: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:10 vm03 ceph-mon[50983]: pgmap v540: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:12 vm00 ceph-mon[49980]: pgmap v541: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:12 vm03 ceph-mon[50983]: pgmap v541: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:14.448 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:14.449 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:14.481 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:14.481 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:14 vm00 ceph-mon[49980]: pgmap v542: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:14 vm03 ceph-mon[50983]: pgmap v542: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:16 vm00 ceph-mon[49980]: pgmap v543: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:16 vm03 ceph-mon[50983]: pgmap v543: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:18 vm00 ceph-mon[49980]: pgmap v544: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:18 vm03 ceph-mon[50983]: pgmap v544: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:19.482 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:19.483 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:19.509 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:19.510 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:20 vm00 ceph-mon[49980]: pgmap v545: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:20 vm03 ceph-mon[50983]: pgmap v545: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:22 vm00 ceph-mon[49980]: pgmap v546: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:22 vm03 ceph-mon[50983]: pgmap v546: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:24.511 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:24.512 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:24.537 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:24.537 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:24 vm00 ceph-mon[49980]: pgmap v547: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:24 vm03 ceph-mon[50983]: pgmap v547: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:26 vm00 ceph-mon[49980]: pgmap v548: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:26 vm03 ceph-mon[50983]: pgmap v548: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:28 vm00 ceph-mon[49980]: pgmap v549: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:28 vm03 ceph-mon[50983]: pgmap v549: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:29.538 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:29.539 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:29.565 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:29.565 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:30 vm00 ceph-mon[49980]: pgmap v550: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:30 vm03 ceph-mon[50983]: pgmap v550: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:32 vm00 ceph-mon[49980]: pgmap v551: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:32 vm03 ceph-mon[50983]: pgmap v551: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:34.567 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:34.567 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:34.592 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:34.593 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:34 vm00 ceph-mon[49980]: pgmap v552: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:34 vm03 ceph-mon[50983]: pgmap v552: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:33:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:33:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:33:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:33:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:36 vm00 ceph-mon[49980]: pgmap v553: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:36 vm03 ceph-mon[50983]: pgmap v553: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:38 vm00 ceph-mon[49980]: pgmap v554: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:39.198 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:38 vm03 ceph-mon[50983]: pgmap v554: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:39.594 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:39.595 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:39.619 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:39.620 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:33:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:33:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:33:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:33:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:33:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:33:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:33:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:33:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:33:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:33:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:40 vm00 ceph-mon[49980]: pgmap v555: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:40 vm03 ceph-mon[50983]: pgmap v555: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:42 vm00 ceph-mon[49980]: pgmap v556: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:42 vm03 ceph-mon[50983]: pgmap v556: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:44.622 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:44.622 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:44.649 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:44.649 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:44 vm00 ceph-mon[49980]: pgmap v557: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:44 vm03 ceph-mon[50983]: pgmap v557: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:47 vm00 ceph-mon[49980]: pgmap v558: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:47 vm03 ceph-mon[50983]: pgmap v558: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:49 vm00 ceph-mon[49980]: pgmap v559: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:49 vm03 ceph-mon[50983]: pgmap v559: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:49.651 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:49.651 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:49.677 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:49.677 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:51 vm00 ceph-mon[49980]: pgmap v560: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:51 vm03 ceph-mon[50983]: pgmap v560: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:53 vm00 ceph-mon[49980]: pgmap v561: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:53 vm03 ceph-mon[50983]: pgmap v561: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:54.679 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:54.679 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:54.707 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:54.707 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:33:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:55 vm00 ceph-mon[49980]: pgmap v562: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:55 vm03 ceph-mon[50983]: pgmap v562: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:33:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:57 vm03 ceph-mon[50983]: pgmap v563: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:57 vm00 ceph-mon[49980]: pgmap v563: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:33:59 vm03 ceph-mon[50983]: pgmap v564: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:33:59 vm00 ceph-mon[49980]: pgmap v564: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:33:59.709 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:33:59.709 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:33:59.735 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:33:59.735 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:01 vm03 ceph-mon[50983]: pgmap v565: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:01 vm00 ceph-mon[49980]: pgmap v565: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:03 vm03 ceph-mon[50983]: pgmap v566: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:03.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:03 vm00 ceph-mon[49980]: pgmap v566: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:04.736 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:04.737 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:04.763 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:04.764 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:05 vm00 ceph-mon[49980]: pgmap v567: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:05 vm03 ceph-mon[50983]: pgmap v567: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:07 vm00 ceph-mon[49980]: pgmap v568: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:07 vm03 ceph-mon[50983]: pgmap v568: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:09.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:09 vm03 ceph-mon[50983]: pgmap v569: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:09 vm00 ceph-mon[49980]: pgmap v569: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:09.765 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:09.765 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:09.790 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:09.791 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:11 vm00 ceph-mon[49980]: pgmap v570: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:11 vm03 ceph-mon[50983]: pgmap v570: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:13 vm00 ceph-mon[49980]: pgmap v571: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:13 vm03 ceph-mon[50983]: pgmap v571: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:14.792 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:14.792 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:14.818 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:14.819 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:15 vm00 ceph-mon[49980]: pgmap v572: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:15 vm03 ceph-mon[50983]: pgmap v572: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:17 vm00 ceph-mon[49980]: pgmap v573: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:17 vm03 ceph-mon[50983]: pgmap v573: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:19 vm00 ceph-mon[49980]: pgmap v574: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:19 vm03 ceph-mon[50983]: pgmap v574: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:19.820 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:19.821 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:19.847 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:19.847 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:21 vm00 ceph-mon[49980]: pgmap v575: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:21 vm03 ceph-mon[50983]: pgmap v575: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:23 vm00 ceph-mon[49980]: pgmap v576: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:23 vm03 ceph-mon[50983]: pgmap v576: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:24.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:24 vm00 ceph-mon[49980]: pgmap v577: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:24.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:24 vm03 ceph-mon[50983]: pgmap v577: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:24.849 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:24.849 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:24.875 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:24.875 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:27.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:26 vm00 ceph-mon[49980]: pgmap v578: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:27.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:26 vm03 ceph-mon[50983]: pgmap v578: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:28 vm00 ceph-mon[49980]: pgmap v579: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:28 vm03 ceph-mon[50983]: pgmap v579: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:29.877 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:29.877 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:29.904 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:29.905 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:30 vm00 ceph-mon[49980]: pgmap v580: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:30 vm03 ceph-mon[50983]: pgmap v580: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:32 vm00 ceph-mon[49980]: pgmap v581: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:32 vm03 ceph-mon[50983]: pgmap v581: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:34.906 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:34.907 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:34.933 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:34.934 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:34 vm00 ceph-mon[49980]: pgmap v582: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:34 vm03 ceph-mon[50983]: pgmap v582: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:34:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:34:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:34:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:34:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:36 vm00 ceph-mon[49980]: pgmap v583: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:36 vm03 ceph-mon[50983]: pgmap v583: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:38 vm00 ceph-mon[49980]: pgmap v584: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:38 vm03 ceph-mon[50983]: pgmap v584: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:39.935 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:39.936 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:39.961 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:39.962 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:34:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:34:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:34:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:34:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:34:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:34:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:34:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:34:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:34:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:34:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:40 vm03 ceph-mon[50983]: pgmap v585: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:40 vm00 ceph-mon[49980]: pgmap v585: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:42 vm03 ceph-mon[50983]: pgmap v586: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:43.082 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:42 vm00 ceph-mon[49980]: pgmap v586: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:44.963 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:44.964 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:44.991 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:44.991 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:44 vm03 ceph-mon[50983]: pgmap v587: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:44 vm00 ceph-mon[49980]: pgmap v587: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:46 vm00 ceph-mon[49980]: pgmap v588: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:46 vm03 ceph-mon[50983]: pgmap v588: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:48 vm00 ceph-mon[49980]: pgmap v589: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:48 vm03 ceph-mon[50983]: pgmap v589: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:49.992 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:49.993 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:50.018 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:50.018 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:50 vm00 ceph-mon[49980]: pgmap v590: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:50 vm03 ceph-mon[50983]: pgmap v590: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:52 vm00 ceph-mon[49980]: pgmap v591: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:52 vm03 ceph-mon[50983]: pgmap v591: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:55.020 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:34:55.020 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:34:55.047 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:34:55.047 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:34:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:54 vm00 ceph-mon[49980]: pgmap v592: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:54 vm03 ceph-mon[50983]: pgmap v592: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:34:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:56 vm00 ceph-mon[49980]: pgmap v593: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:56 vm03 ceph-mon[50983]: pgmap v593: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:34:58 vm00 ceph-mon[49980]: pgmap v594: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:34:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:34:58 vm03 ceph-mon[50983]: pgmap v594: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:00.049 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:00.049 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:00.074 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:00.074 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:00 vm00 ceph-mon[49980]: pgmap v595: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:00 vm03 ceph-mon[50983]: pgmap v595: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:02 vm00 ceph-mon[49980]: pgmap v596: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:02 vm03 ceph-mon[50983]: pgmap v596: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:05.076 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:05.076 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:05.102 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:05.103 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:04 vm00 ceph-mon[49980]: pgmap v597: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:04 vm03 ceph-mon[50983]: pgmap v597: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:06 vm00 ceph-mon[49980]: pgmap v598: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:06 vm03 ceph-mon[50983]: pgmap v598: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:08 vm00 ceph-mon[49980]: pgmap v599: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:08 vm03 ceph-mon[50983]: pgmap v599: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:10.104 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:10.104 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:10.131 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:10.132 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:10 vm00 ceph-mon[49980]: pgmap v600: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:10 vm03 ceph-mon[50983]: pgmap v600: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:12 vm00 ceph-mon[49980]: pgmap v601: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:12 vm03 ceph-mon[50983]: pgmap v601: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:15.133 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:15.134 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:15.161 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:15.162 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:14 vm00 ceph-mon[49980]: pgmap v602: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:14 vm03 ceph-mon[50983]: pgmap v602: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:16 vm00 ceph-mon[49980]: pgmap v603: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:16 vm03 ceph-mon[50983]: pgmap v603: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:18 vm00 ceph-mon[49980]: pgmap v604: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:18 vm03 ceph-mon[50983]: pgmap v604: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:20.163 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:20.163 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:20.191 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:20.191 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:20 vm00 ceph-mon[49980]: pgmap v605: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:20 vm03 ceph-mon[50983]: pgmap v605: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:22 vm00 ceph-mon[49980]: pgmap v606: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:22 vm03 ceph-mon[50983]: pgmap v606: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:25.193 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:25.193 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:25.219 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:25.220 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:24 vm00 ceph-mon[49980]: pgmap v607: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:24 vm03 ceph-mon[50983]: pgmap v607: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:26 vm00 ceph-mon[49980]: pgmap v608: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:26 vm03 ceph-mon[50983]: pgmap v608: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:28 vm00 ceph-mon[49980]: pgmap v609: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:28 vm03 ceph-mon[50983]: pgmap v609: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:30.221 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:30.222 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:30.247 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:30.248 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:30 vm00 ceph-mon[49980]: pgmap v610: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:30 vm03 ceph-mon[50983]: pgmap v610: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:32 vm00 ceph-mon[49980]: pgmap v611: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:33.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:32 vm03 ceph-mon[50983]: pgmap v611: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:35.249 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:35.249 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:35.276 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:35.276 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:34 vm00 ceph-mon[49980]: pgmap v612: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:34 vm03 ceph-mon[50983]: pgmap v612: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:35:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:35:36.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:35:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:35:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:36 vm00 ceph-mon[49980]: pgmap v613: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:37.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:36 vm03 ceph-mon[50983]: pgmap v613: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:38 vm00 ceph-mon[49980]: pgmap v614: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:39.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:38 vm03 ceph-mon[50983]: pgmap v614: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:39.980 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:35:39.980 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:35:39.980 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:35:39.980 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:35:39.980 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:35:40.277 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:40.278 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:35:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:35:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:35:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:35:40.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:35:40.303 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:40.304 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:40 vm00 ceph-mon[49980]: pgmap v615: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:40 vm03 ceph-mon[50983]: pgmap v615: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:42 vm00 ceph-mon[49980]: pgmap v616: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:42 vm03 ceph-mon[50983]: pgmap v616: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:44 vm00 ceph-mon[49980]: pgmap v617: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:45.305 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:44 vm03 ceph-mon[50983]: pgmap v617: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:45.306 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:45.331 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:45.332 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:46 vm00 ceph-mon[49980]: pgmap v618: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:47.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:46 vm03 ceph-mon[50983]: pgmap v618: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:49 vm00 ceph-mon[49980]: pgmap v619: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:49 vm03 ceph-mon[50983]: pgmap v619: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:50.333 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:50.334 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:50.360 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:50.360 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:51 vm00 ceph-mon[49980]: pgmap v620: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:51 vm03 ceph-mon[50983]: pgmap v620: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:53 vm00 ceph-mon[49980]: pgmap v621: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:53.305 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:53 vm03 ceph-mon[50983]: pgmap v621: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:55.361 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:35:55.362 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:35:55.387 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:35:55.388 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:35:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:55 vm00 ceph-mon[49980]: pgmap v622: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:55 vm03 ceph-mon[50983]: pgmap v622: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:35:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:57 vm00 ceph-mon[49980]: pgmap v623: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:57 vm03 ceph-mon[50983]: pgmap v623: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:35:59 vm00 ceph-mon[49980]: pgmap v624: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:35:59.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:35:59 vm03 ceph-mon[50983]: pgmap v624: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:00.389 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:00.389 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:00.415 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:00.415 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:00 vm00 ceph-mon[49980]: pgmap v625: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:00 vm03 ceph-mon[50983]: pgmap v625: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:02 vm00 ceph-mon[49980]: pgmap v626: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:02 vm03 ceph-mon[50983]: pgmap v626: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:04 vm00 ceph-mon[49980]: pgmap v627: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:04 vm03 ceph-mon[50983]: pgmap v627: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:05.416 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:05.417 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:05.442 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:05.443 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:06 vm00 ceph-mon[49980]: pgmap v628: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:06 vm03 ceph-mon[50983]: pgmap v628: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:08 vm00 ceph-mon[49980]: pgmap v629: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:08 vm03 ceph-mon[50983]: pgmap v629: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:10.444 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:10.445 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:10.488 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:10.488 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:10 vm00 ceph-mon[49980]: pgmap v630: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:10 vm03 ceph-mon[50983]: pgmap v630: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:12 vm00 ceph-mon[49980]: pgmap v631: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:12 vm03 ceph-mon[50983]: pgmap v631: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:14 vm00 ceph-mon[49980]: pgmap v632: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:14 vm03 ceph-mon[50983]: pgmap v632: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:15.489 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:15.490 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:15.515 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:15.516 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:16 vm00 ceph-mon[49980]: pgmap v633: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:16 vm03 ceph-mon[50983]: pgmap v633: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:18 vm00 ceph-mon[49980]: pgmap v634: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:18 vm03 ceph-mon[50983]: pgmap v634: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:20.517 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:20.518 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:20.544 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:20.545 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:20 vm00 ceph-mon[49980]: pgmap v635: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:20 vm03 ceph-mon[50983]: pgmap v635: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:22 vm00 ceph-mon[49980]: pgmap v636: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:22 vm03 ceph-mon[50983]: pgmap v636: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:24 vm00 ceph-mon[49980]: pgmap v637: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:24 vm03 ceph-mon[50983]: pgmap v637: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:25.546 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:25.547 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:25.572 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:25.572 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:26 vm00 ceph-mon[49980]: pgmap v638: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:26 vm03 ceph-mon[50983]: pgmap v638: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:28 vm00 ceph-mon[49980]: pgmap v639: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:28 vm03 ceph-mon[50983]: pgmap v639: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:30.574 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:30.574 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:30.601 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:30.601 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:30 vm00 ceph-mon[49980]: pgmap v640: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:30 vm03 ceph-mon[50983]: pgmap v640: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:32 vm00 ceph-mon[49980]: pgmap v641: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:32 vm03 ceph-mon[50983]: pgmap v641: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:34 vm00 ceph-mon[49980]: pgmap v642: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:34 vm03 ceph-mon[50983]: pgmap v642: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:35.603 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:35.603 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:35.629 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:35.629 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:36:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:36:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:36:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:36:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:36 vm00 ceph-mon[49980]: pgmap v643: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:36 vm03 ceph-mon[50983]: pgmap v643: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:38 vm00 ceph-mon[49980]: pgmap v644: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:38 vm03 ceph-mon[50983]: pgmap v644: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:40.160 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:36:40.160 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:36:40.160 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:39 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:36:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:36:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:36:40.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:39 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:36:40.630 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:40.631 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:40.656 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:40.657 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:41 vm00 ceph-mon[49980]: pgmap v645: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:36:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:36:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:36:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:41 vm03 ceph-mon[50983]: pgmap v645: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:36:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:36:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:36:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:43 vm00 ceph-mon[49980]: pgmap v646: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:43 vm03 ceph-mon[50983]: pgmap v646: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:45 vm00 ceph-mon[49980]: pgmap v647: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:45 vm03 ceph-mon[50983]: pgmap v647: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:45.658 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:45.659 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:45.684 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:45.685 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:47 vm00 ceph-mon[49980]: pgmap v648: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:47 vm03 ceph-mon[50983]: pgmap v648: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:49 vm00 ceph-mon[49980]: pgmap v649: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:49.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:49 vm03 ceph-mon[50983]: pgmap v649: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:50.686 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:50.687 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:50.713 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:50.713 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:51 vm00 ceph-mon[49980]: pgmap v650: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:51 vm03 ceph-mon[50983]: pgmap v650: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:53 vm00 ceph-mon[49980]: pgmap v651: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:53 vm03 ceph-mon[50983]: pgmap v651: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:54.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:54 vm00 ceph-mon[49980]: pgmap v652: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:54.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:54 vm03 ceph-mon[50983]: pgmap v652: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:36:55.715 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:36:55.715 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:36:55.741 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:36:55.741 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:36:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:56 vm00 ceph-mon[49980]: pgmap v653: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:56 vm03 ceph-mon[50983]: pgmap v653: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:36:58 vm00 ceph-mon[49980]: pgmap v654: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:36:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:36:58 vm03 ceph-mon[50983]: pgmap v654: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:00.742 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:00.743 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:00.771 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:00.771 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:00 vm00 ceph-mon[49980]: pgmap v655: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:00 vm03 ceph-mon[50983]: pgmap v655: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:02 vm00 ceph-mon[49980]: pgmap v656: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:02 vm03 ceph-mon[50983]: pgmap v656: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:04 vm00 ceph-mon[49980]: pgmap v657: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:04 vm03 ceph-mon[50983]: pgmap v657: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:05.773 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:05.773 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:05.799 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:05.799 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:06 vm00 ceph-mon[49980]: pgmap v658: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:06 vm03 ceph-mon[50983]: pgmap v658: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:08 vm00 ceph-mon[49980]: pgmap v659: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:08 vm03 ceph-mon[50983]: pgmap v659: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:10.801 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:10.801 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:10.827 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:10.827 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:10 vm00 ceph-mon[49980]: pgmap v660: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:10 vm03 ceph-mon[50983]: pgmap v660: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:12 vm00 ceph-mon[49980]: pgmap v661: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:12 vm03 ceph-mon[50983]: pgmap v661: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:14 vm00 ceph-mon[49980]: pgmap v662: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:14 vm03 ceph-mon[50983]: pgmap v662: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:15.828 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:15.829 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:15.854 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:15.854 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:16 vm00 ceph-mon[49980]: pgmap v663: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:16 vm03 ceph-mon[50983]: pgmap v663: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:18 vm00 ceph-mon[49980]: pgmap v664: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:18 vm03 ceph-mon[50983]: pgmap v664: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:20.856 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:20.856 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:20.887 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:20.888 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:20 vm00 ceph-mon[49980]: pgmap v665: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:20 vm03 ceph-mon[50983]: pgmap v665: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:22 vm00 ceph-mon[49980]: pgmap v666: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:22 vm03 ceph-mon[50983]: pgmap v666: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:24 vm00 ceph-mon[49980]: pgmap v667: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:24 vm03 ceph-mon[50983]: pgmap v667: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:25.889 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:25.889 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:25.917 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:25.917 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:26 vm00 ceph-mon[49980]: pgmap v668: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:26 vm03 ceph-mon[50983]: pgmap v668: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:28 vm00 ceph-mon[49980]: pgmap v669: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:28 vm03 ceph-mon[50983]: pgmap v669: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:30.919 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:30.920 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:30.945 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:30.945 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:30 vm00 ceph-mon[49980]: pgmap v670: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:30 vm03 ceph-mon[50983]: pgmap v670: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:32 vm00 ceph-mon[49980]: pgmap v671: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:32 vm03 ceph-mon[50983]: pgmap v671: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:35 vm00 ceph-mon[49980]: pgmap v672: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:35 vm03 ceph-mon[50983]: pgmap v672: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:35.947 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:35.947 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:35.974 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:35.975 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:37:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:37:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:37:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:37:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:37 vm00 ceph-mon[49980]: pgmap v673: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:37 vm03 ceph-mon[50983]: pgmap v673: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:39.360 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:39 vm03 ceph-mon[50983]: pgmap v674: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:39 vm00 ceph-mon[49980]: pgmap v674: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:40.976 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:40.976 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:41.002 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:41.003 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: pgmap v675: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: pgmap v675: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:37:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:43 vm00 ceph-mon[49980]: pgmap v676: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:43 vm03 ceph-mon[50983]: pgmap v676: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:45 vm00 ceph-mon[49980]: pgmap v677: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:45 vm03 ceph-mon[50983]: pgmap v677: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:46.004 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:46.004 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:46.031 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:46.031 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:47 vm00 ceph-mon[49980]: pgmap v678: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:47 vm03 ceph-mon[50983]: pgmap v678: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:49.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:49 vm00 ceph-mon[49980]: pgmap v679: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:49.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:49 vm03 ceph-mon[50983]: pgmap v679: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:51.033 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:51.033 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:51.059 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:51.059 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:51 vm00 ceph-mon[49980]: pgmap v680: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:51 vm03 ceph-mon[50983]: pgmap v680: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:53 vm00 ceph-mon[49980]: pgmap v681: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:53 vm03 ceph-mon[50983]: pgmap v681: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:55 vm00 ceph-mon[49980]: pgmap v682: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:55 vm03 ceph-mon[50983]: pgmap v682: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:37:56.060 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:37:56.061 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:37:56.087 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:37:56.087 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:37:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:57 vm00 ceph-mon[49980]: pgmap v683: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:57 vm03 ceph-mon[50983]: pgmap v683: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:37:59 vm00 ceph-mon[49980]: pgmap v684: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:37:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:37:59 vm03 ceph-mon[50983]: pgmap v684: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:01.089 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:01.090 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:01.118 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:01.118 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:01 vm00 ceph-mon[49980]: pgmap v685: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:01 vm03 ceph-mon[50983]: pgmap v685: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:03 vm00 ceph-mon[49980]: pgmap v686: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:03 vm03 ceph-mon[50983]: pgmap v686: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:04.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:04 vm00 ceph-mon[49980]: pgmap v687: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:04.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:04 vm03 ceph-mon[50983]: pgmap v687: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:06.120 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:06.121 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:06.148 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:06.148 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:06 vm00 ceph-mon[49980]: pgmap v688: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:06 vm03 ceph-mon[50983]: pgmap v688: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:08 vm00 ceph-mon[49980]: pgmap v689: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:08 vm03 ceph-mon[50983]: pgmap v689: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:11.150 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:11.151 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:11.177 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:11.178 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:10 vm00 ceph-mon[49980]: pgmap v690: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:10 vm03 ceph-mon[50983]: pgmap v690: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:12 vm00 ceph-mon[49980]: pgmap v691: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:12 vm03 ceph-mon[50983]: pgmap v691: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:15.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:14 vm00 ceph-mon[49980]: pgmap v692: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:14 vm03 ceph-mon[50983]: pgmap v692: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:16.179 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:16.180 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:16.208 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:16.208 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:16 vm00 ceph-mon[49980]: pgmap v693: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:16 vm03 ceph-mon[50983]: pgmap v693: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:18 vm00 ceph-mon[49980]: pgmap v694: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:18 vm03 ceph-mon[50983]: pgmap v694: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:21.210 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:21.210 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:21.237 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:21.238 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:20 vm00 ceph-mon[49980]: pgmap v695: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:20 vm03 ceph-mon[50983]: pgmap v695: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:22 vm00 ceph-mon[49980]: pgmap v696: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:22 vm03 ceph-mon[50983]: pgmap v696: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:24 vm00 ceph-mon[49980]: pgmap v697: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:24 vm03 ceph-mon[50983]: pgmap v697: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:26.239 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:26.239 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:26.265 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:26.265 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:26 vm00 ceph-mon[49980]: pgmap v698: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:26 vm03 ceph-mon[50983]: pgmap v698: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:28 vm00 ceph-mon[49980]: pgmap v699: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:28 vm03 ceph-mon[50983]: pgmap v699: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:31.267 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:31.267 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:30 vm00 ceph-mon[49980]: pgmap v700: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:31.292 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:31.292 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:30 vm03 ceph-mon[50983]: pgmap v700: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:32 vm00 ceph-mon[49980]: pgmap v701: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:32 vm03 ceph-mon[50983]: pgmap v701: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:34 vm00 ceph-mon[49980]: pgmap v702: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:34 vm03 ceph-mon[50983]: pgmap v702: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:38:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:35 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:38:36.293 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:36.294 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:38:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:35 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:38:36.320 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:36.321 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:37 vm00 ceph-mon[49980]: pgmap v703: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:37 vm03 ceph-mon[50983]: pgmap v703: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:39 vm00 ceph-mon[49980]: pgmap v704: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:39 vm03 ceph-mon[50983]: pgmap v704: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:41.094 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:41 vm00 ceph-mon[49980]: pgmap v705: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:41.094 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:38:41.094 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:38:41.094 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:41 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:38:41.166 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:41 vm03 ceph-mon[50983]: pgmap v705: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:41.166 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:38:41.166 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:38:41.166 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:41 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:38:41.322 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:41.322 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:41.375 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:41.376 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:42 vm00 ceph-mon[49980]: pgmap v706: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:38:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:42 vm03 ceph-mon[50983]: pgmap v706: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:38:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:38:45.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:44 vm00 ceph-mon[49980]: pgmap v707: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:44 vm03 ceph-mon[50983]: pgmap v707: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:46.378 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:46.379 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:46.405 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:46.406 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:46 vm00 ceph-mon[49980]: pgmap v708: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:46 vm03 ceph-mon[50983]: pgmap v708: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:48 vm00 ceph-mon[49980]: pgmap v709: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:48 vm03 ceph-mon[50983]: pgmap v709: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:50 vm00 ceph-mon[49980]: pgmap v710: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:50 vm03 ceph-mon[50983]: pgmap v710: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:51.407 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:51.408 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:51.433 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:51.433 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:52 vm00 ceph-mon[49980]: pgmap v711: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:52 vm03 ceph-mon[50983]: pgmap v711: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:54 vm00 ceph-mon[49980]: pgmap v712: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:54 vm03 ceph-mon[50983]: pgmap v712: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:38:56.435 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:38:56.436 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:38:56.462 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:38:56.462 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:38:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:56 vm00 ceph-mon[49980]: pgmap v713: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:56 vm03 ceph-mon[50983]: pgmap v713: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:38:58 vm00 ceph-mon[49980]: pgmap v714: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:38:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:38:58 vm03 ceph-mon[50983]: pgmap v714: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:00 vm00 ceph-mon[49980]: pgmap v715: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:00 vm03 ceph-mon[50983]: pgmap v715: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:01.464 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:01.464 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:01.490 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:01.491 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:02 vm00 ceph-mon[49980]: pgmap v716: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:02 vm03 ceph-mon[50983]: pgmap v716: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:04 vm00 ceph-mon[49980]: pgmap v717: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:04 vm03 ceph-mon[50983]: pgmap v717: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:06.493 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:06.493 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:06.519 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:06.520 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:06 vm00 ceph-mon[49980]: pgmap v718: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:06 vm03 ceph-mon[50983]: pgmap v718: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:08 vm00 ceph-mon[49980]: pgmap v719: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:08 vm03 ceph-mon[50983]: pgmap v719: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:10 vm00 ceph-mon[49980]: pgmap v720: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:10 vm03 ceph-mon[50983]: pgmap v720: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:11.521 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:11.522 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:11.548 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:11.548 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:13 vm00 ceph-mon[49980]: pgmap v721: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:13 vm03 ceph-mon[50983]: pgmap v721: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:15 vm00 ceph-mon[49980]: pgmap v722: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:15 vm03 ceph-mon[50983]: pgmap v722: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:16.550 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:16.550 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:16.656 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:16.657 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:17 vm00 ceph-mon[49980]: pgmap v723: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:17 vm03 ceph-mon[50983]: pgmap v723: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:19 vm00 ceph-mon[49980]: pgmap v724: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:19 vm03 ceph-mon[50983]: pgmap v724: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:21 vm00 ceph-mon[49980]: pgmap v725: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:21 vm03 ceph-mon[50983]: pgmap v725: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:21.658 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:21.659 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:21.691 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:21.692 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:23 vm00 ceph-mon[49980]: pgmap v726: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:23 vm03 ceph-mon[50983]: pgmap v726: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:25 vm03 ceph-mon[50983]: pgmap v727: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:25 vm00 ceph-mon[49980]: pgmap v727: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:26.693 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:26.694 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:26.720 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:26.721 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:27 vm03 ceph-mon[50983]: pgmap v728: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:27 vm00 ceph-mon[49980]: pgmap v728: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:29.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:29 vm03 ceph-mon[50983]: pgmap v729: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:29 vm00 ceph-mon[49980]: pgmap v729: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:31 vm00 ceph-mon[49980]: pgmap v730: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:31 vm03 ceph-mon[50983]: pgmap v730: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:31.722 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:31.723 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:31.751 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:31.752 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:33 vm00 ceph-mon[49980]: pgmap v731: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:33 vm03 ceph-mon[50983]: pgmap v731: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:35 vm00 ceph-mon[49980]: pgmap v732: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:35 vm03 ceph-mon[50983]: pgmap v732: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:39:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:39:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:39:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:39:36.753 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:36.754 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:36.779 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:36.780 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:37 vm00 ceph-mon[49980]: pgmap v733: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:37 vm03 ceph-mon[50983]: pgmap v733: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:38.734 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:38 vm00 ceph-mon[49980]: pgmap v734: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:38.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:38 vm03 ceph-mon[50983]: pgmap v734: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:40 vm00 ceph-mon[49980]: pgmap v735: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:40 vm03 ceph-mon[50983]: pgmap v735: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:41.782 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:41.782 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:41.810 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:41.810 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:42 vm00 ceph-mon[49980]: pgmap v736: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:39:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:39:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:39:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:39:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:42 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:39:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:42 vm03 ceph-mon[50983]: pgmap v736: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:39:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:39:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:39:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:39:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:42 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:39:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:44 vm00 ceph-mon[49980]: pgmap v737: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:44 vm03 ceph-mon[50983]: pgmap v737: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:46.812 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:46.813 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:46.841 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:46.841 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:46 vm00 ceph-mon[49980]: pgmap v738: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:46 vm03 ceph-mon[50983]: pgmap v738: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:48 vm00 ceph-mon[49980]: pgmap v739: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:48 vm03 ceph-mon[50983]: pgmap v739: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:50 vm00 ceph-mon[49980]: pgmap v740: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:50 vm03 ceph-mon[50983]: pgmap v740: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:51.843 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:51.843 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:51.871 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:51.871 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:52 vm00 ceph-mon[49980]: pgmap v741: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:52 vm03 ceph-mon[50983]: pgmap v741: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:54 vm00 ceph-mon[49980]: pgmap v742: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:54 vm03 ceph-mon[50983]: pgmap v742: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:39:56.873 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:39:56.873 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:39:56.899 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:39:56.900 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:39:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:57 vm00 ceph-mon[49980]: pgmap v743: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:57 vm03 ceph-mon[50983]: pgmap v743: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:59.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:39:59 vm03 ceph-mon[50983]: pgmap v744: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:39:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:39:59 vm00 ceph-mon[49980]: pgmap v744: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:00 vm00 ceph-mon[49980]: pgmap v745: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:00 vm00 ceph-mon[49980]: overall HEALTH_OK 2026-03-10T05:40:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:00 vm03 ceph-mon[50983]: pgmap v745: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:00 vm03 ceph-mon[50983]: overall HEALTH_OK 2026-03-10T05:40:01.902 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:01.902 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:01.929 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:01.930 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:02 vm00 ceph-mon[49980]: pgmap v746: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:02 vm03 ceph-mon[50983]: pgmap v746: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:04 vm00 ceph-mon[49980]: pgmap v747: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:04 vm03 ceph-mon[50983]: pgmap v747: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:06.932 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:06.932 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:06.958 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:06.958 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:06 vm00 ceph-mon[49980]: pgmap v748: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:06 vm03 ceph-mon[50983]: pgmap v748: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:08 vm00 ceph-mon[49980]: pgmap v749: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:08 vm03 ceph-mon[50983]: pgmap v749: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:10 vm00 ceph-mon[49980]: pgmap v750: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:10 vm03 ceph-mon[50983]: pgmap v750: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:11.960 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:11.960 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:11.988 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:11.989 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:12 vm00 ceph-mon[49980]: pgmap v751: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:12 vm03 ceph-mon[50983]: pgmap v751: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:14 vm00 ceph-mon[49980]: pgmap v752: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:14 vm03 ceph-mon[50983]: pgmap v752: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:16.991 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:16.992 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:17.018 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:17.018 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:16 vm00 ceph-mon[49980]: pgmap v753: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:16 vm03 ceph-mon[50983]: pgmap v753: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:19 vm00 ceph-mon[49980]: pgmap v754: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:19 vm03 ceph-mon[50983]: pgmap v754: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:21 vm00 ceph-mon[49980]: pgmap v755: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:21 vm03 ceph-mon[50983]: pgmap v755: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:22.020 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:22.020 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:22.046 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:22.046 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:23 vm00 ceph-mon[49980]: pgmap v756: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:23 vm03 ceph-mon[50983]: pgmap v756: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:25 vm00 ceph-mon[49980]: pgmap v757: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:25 vm03 ceph-mon[50983]: pgmap v757: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:27.048 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:27.048 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:27.075 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:27.076 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:27 vm00 ceph-mon[49980]: pgmap v758: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:27 vm03 ceph-mon[50983]: pgmap v758: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:29 vm00 ceph-mon[49980]: pgmap v759: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:29 vm03 ceph-mon[50983]: pgmap v759: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:31 vm03 ceph-mon[50983]: pgmap v760: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:31 vm00 ceph-mon[49980]: pgmap v760: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:32.077 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:32.077 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:32.104 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:32.105 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:33 vm03 ceph-mon[50983]: pgmap v761: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:33 vm00 ceph-mon[49980]: pgmap v761: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:35 vm03 ceph-mon[50983]: pgmap v762: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:35.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:35 vm00 ceph-mon[49980]: pgmap v762: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:40:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:36 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:40:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:40:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:36 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:40:37.106 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:37.106 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:37.132 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:37.132 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:37 vm03 ceph-mon[50983]: pgmap v763: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:37 vm00 ceph-mon[49980]: pgmap v763: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:38.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:38 vm03 ceph-mon[50983]: pgmap v764: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:38.737 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:38 vm00 ceph-mon[49980]: pgmap v764: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:40 vm00 ceph-mon[49980]: pgmap v765: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:40 vm03 ceph-mon[50983]: pgmap v765: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:42.133 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:42.134 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:42.160 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:42.161 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:43.341 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:43 vm03 ceph-mon[50983]: pgmap v766: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:43.341 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:43 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:40:43.341 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:43 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:40:43.343 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:43 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:40:43.618 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:43 vm00 ceph-mon[49980]: pgmap v766: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:43.618 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:43 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:40:43.618 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:43 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:40:43.618 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:43 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:40:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:44 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:40:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:44 vm00 ceph-mon[49980]: pgmap v767: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:44 vm00 ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:40:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:44 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:40:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:44 vm03 ceph-mon[50983]: pgmap v767: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:44 vm03 ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:40:47.162 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:47.163 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:47.189 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:47.189 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:46 vm00 ceph-mon[49980]: pgmap v768: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:46 vm03 ceph-mon[50983]: pgmap v768: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:48 vm00 ceph-mon[49980]: pgmap v769: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:48 vm03 ceph-mon[50983]: pgmap v769: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:50 vm00 ceph-mon[49980]: pgmap v770: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:50 vm03 ceph-mon[50983]: pgmap v770: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:52.190 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:52.191 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:52.217 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:52.218 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:52 vm00 ceph-mon[49980]: pgmap v771: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:52 vm03 ceph-mon[50983]: pgmap v771: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:54 vm00 ceph-mon[49980]: pgmap v772: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:54 vm03 ceph-mon[50983]: pgmap v772: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:40:57.219 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:40:57.220 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:40:57.244 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:40:57.245 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:40:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:56 vm00 ceph-mon[49980]: pgmap v773: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:56 vm03 ceph-mon[50983]: pgmap v773: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:40:59 vm00 ceph-mon[49980]: pgmap v774: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:40:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:40:59 vm03 ceph-mon[50983]: pgmap v774: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:01 vm00 ceph-mon[49980]: pgmap v775: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:01 vm03 ceph-mon[50983]: pgmap v775: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:02.246 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:02.247 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:41:02.274 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:02.274 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:03 vm00 ceph-mon[49980]: pgmap v776: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:03 vm03 ceph-mon[50983]: pgmap v776: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:05 vm00 ceph-mon[49980]: pgmap v777: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:05 vm03 ceph-mon[50983]: pgmap v777: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:07.276 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:07.276 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-10T05:41:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:07 vm00 ceph-mon[49980]: pgmap v778: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:07.303 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:07.304 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:07 vm03 ceph-mon[50983]: pgmap v778: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:09 vm00.local ceph-mon[49980]: pgmap v779: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:09 vm03 ceph-mon[50983]: pgmap v779: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:11 vm00.local ceph-mon[49980]: pgmap v780: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:11 vm03 ceph-mon[50983]: pgmap v780: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:12.306 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:12.306 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:12.337 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:12.338 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:13.341 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:13 vm00.local ceph-mon[49980]: pgmap v781: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:13 vm03 ceph-mon[50983]: pgmap v781: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:15 vm00.local ceph-mon[49980]: pgmap v782: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:15.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:15 vm03 ceph-mon[50983]: pgmap v782: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:17.339 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:17.340 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:17.367 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:17.368 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:17 vm00.local ceph-mon[49980]: pgmap v783: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:17 vm03 ceph-mon[50983]: pgmap v783: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:19 vm00.local ceph-mon[49980]: pgmap v784: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:19 vm03 ceph-mon[50983]: pgmap v784: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:21 vm00.local ceph-mon[49980]: pgmap v785: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:21 vm03 ceph-mon[50983]: pgmap v785: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:22.369 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:22.370 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:22.397 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:22.397 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:22.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:22 vm03 ceph-mon[50983]: pgmap v786: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:22.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:22 vm00.local ceph-mon[49980]: pgmap v786: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:24.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:24 vm00.local ceph-mon[49980]: pgmap v787: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:24 vm03 ceph-mon[50983]: pgmap v787: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:27.399 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:27.399 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:27.454 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:27.454 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:27 vm00.local ceph-mon[49980]: pgmap v788: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:27 vm03 ceph-mon[50983]: pgmap v788: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:28.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:28 vm00.local ceph-mon[49980]: pgmap v789: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:28.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:28 vm03 ceph-mon[50983]: pgmap v789: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:31 vm00.local ceph-mon[49980]: pgmap v790: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:31 vm03 ceph-mon[50983]: pgmap v790: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:32.455 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:32.456 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:32.482 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:32.482 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:33 vm03.local ceph-mon[50983]: pgmap v791: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:33 vm00.local ceph-mon[49980]: pgmap v791: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:35 vm03.local ceph-mon[50983]: pgmap v792: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:35.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:35 vm00.local ceph-mon[49980]: pgmap v792: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:41:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:41:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:41:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:41:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:37 vm03.local ceph-mon[50983]: pgmap v793: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:41:37.484 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:37.484 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:37 vm00.local ceph-mon[49980]: pgmap v793: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:41:37.568 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:37.569 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:38.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:38 vm00.local ceph-mon[49980]: pgmap v794: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:41:38.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:38 vm03.local ceph-mon[50983]: pgmap v794: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:41:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:40 vm00.local ceph-mon[49980]: pgmap v795: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:41 vm03.local ceph-mon[50983]: pgmap v795: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:42.570 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:42.571 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:42.595 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:42.596 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:43 vm00.local ceph-mon[49980]: pgmap v796: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:41:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:43 vm03.local ceph-mon[50983]: pgmap v796: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:41:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:41:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:41:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:41:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:41:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:41:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:41:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:41:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:41:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:41:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:41:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:45 vm00.local ceph-mon[49980]: pgmap v797: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:45 vm03.local ceph-mon[50983]: pgmap v797: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:47 vm00.local ceph-mon[49980]: pgmap v798: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:47 vm03.local ceph-mon[50983]: pgmap v798: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:47.597 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:47.598 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:47.625 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:47.625 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:49 vm00.local ceph-mon[49980]: pgmap v799: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:49 vm03.local ceph-mon[50983]: pgmap v799: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:51 vm03.local ceph-mon[50983]: pgmap v800: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:51.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:51 vm00.local ceph-mon[49980]: pgmap v800: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:52.627 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:52.628 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:52.653 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:52.654 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:53 vm03.local ceph-mon[50983]: pgmap v801: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:53 vm00.local ceph-mon[49980]: pgmap v801: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:55 vm03.local ceph-mon[50983]: pgmap v802: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:55 vm00.local ceph-mon[49980]: pgmap v802: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:41:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:57 vm03.local ceph-mon[50983]: pgmap v803: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:57 vm00.local ceph-mon[49980]: pgmap v803: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:57.655 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:41:57.656 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:41:57.682 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:41:57.682 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:41:59.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:41:59 vm03.local ceph-mon[50983]: pgmap v804: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:41:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:41:59 vm00.local ceph-mon[49980]: pgmap v804: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:01.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:01 vm00.local ceph-mon[49980]: pgmap v805: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:01 vm03.local ceph-mon[50983]: pgmap v805: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:02.683 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:02.684 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:02.709 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:02.710 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:03 vm00.local ceph-mon[49980]: pgmap v806: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:03 vm03.local ceph-mon[50983]: pgmap v806: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:05 vm00.local ceph-mon[49980]: pgmap v807: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:05 vm03.local ceph-mon[50983]: pgmap v807: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:07 vm00.local ceph-mon[49980]: pgmap v808: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:07 vm03.local ceph-mon[50983]: pgmap v808: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:07.711 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:07.712 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:07.738 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:07.738 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:08.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:08 vm00.local ceph-mon[49980]: pgmap v809: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:08.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:08 vm03.local ceph-mon[50983]: pgmap v809: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:11 vm00.local ceph-mon[49980]: pgmap v810: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:11 vm03.local ceph-mon[50983]: pgmap v810: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:12.740 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:12.740 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:12.768 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:12.769 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:13 vm00.local ceph-mon[49980]: pgmap v811: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:13 vm03.local ceph-mon[50983]: pgmap v811: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:15 vm00.local ceph-mon[49980]: pgmap v812: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:15 vm03.local ceph-mon[50983]: pgmap v812: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:17 vm00.local ceph-mon[49980]: pgmap v813: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:17 vm03.local ceph-mon[50983]: pgmap v813: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:17.770 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:17.771 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:17.807 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:17.807 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:19 vm00.local ceph-mon[49980]: pgmap v814: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:19 vm03.local ceph-mon[50983]: pgmap v814: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:21 vm00.local ceph-mon[49980]: pgmap v815: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:21 vm03.local ceph-mon[50983]: pgmap v815: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:22.809 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:22.809 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:22.834 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:22.834 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:23.472 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:23 vm00.local ceph-mon[49980]: pgmap v816: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:23 vm03.local ceph-mon[50983]: pgmap v816: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:25 vm00.local ceph-mon[49980]: pgmap v817: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:25 vm03.local ceph-mon[50983]: pgmap v817: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:27 vm00.local ceph-mon[49980]: pgmap v818: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:27 vm03.local ceph-mon[50983]: pgmap v818: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:27.836 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:27.836 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:27.862 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:27.862 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:29 vm00.local ceph-mon[49980]: pgmap v819: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:29 vm03.local ceph-mon[50983]: pgmap v819: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:31 vm00.local ceph-mon[49980]: pgmap v820: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:31 vm03.local ceph-mon[50983]: pgmap v820: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:32.863 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:32.864 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:32.889 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:32.890 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:33 vm00.local ceph-mon[49980]: pgmap v821: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:33 vm03.local ceph-mon[50983]: pgmap v821: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:35 vm00.local ceph-mon[49980]: pgmap v822: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:35 vm03.local ceph-mon[50983]: pgmap v822: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:42:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:42:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:42:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:42:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:37 vm03.local ceph-mon[50983]: pgmap v823: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:37 vm00.local ceph-mon[49980]: pgmap v823: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:37.891 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:37.892 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:37.916 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:37.917 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:39 vm03.local ceph-mon[50983]: pgmap v824: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:39 vm00.local ceph-mon[49980]: pgmap v824: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:41 vm03.local ceph-mon[50983]: pgmap v825: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:41 vm00.local ceph-mon[49980]: pgmap v825: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:42.919 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:42.919 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:42.944 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:42.944 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:43 vm03.local ceph-mon[50983]: pgmap v826: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:43.333 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:43 vm00.local ceph-mon[49980]: pgmap v826: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:42:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:42:44.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:44 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:42:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:42:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:42:44.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:44 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:42:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:45 vm00.local ceph-mon[49980]: pgmap v827: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:42:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:42:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:45 vm03.local ceph-mon[50983]: pgmap v827: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:42:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:42:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:47 vm00.local ceph-mon[49980]: pgmap v828: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:47 vm03.local ceph-mon[50983]: pgmap v828: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:47.946 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:47.946 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:47.972 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:47.973 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:49.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:49 vm03.local ceph-mon[50983]: pgmap v829: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:49 vm00.local ceph-mon[49980]: pgmap v829: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:51 vm00.local ceph-mon[49980]: pgmap v830: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:51 vm03.local ceph-mon[50983]: pgmap v830: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:52.974 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:52.975 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:52.999 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:53.000 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:53 vm00.local ceph-mon[49980]: pgmap v831: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:53 vm03.local ceph-mon[50983]: pgmap v831: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:55 vm00.local ceph-mon[49980]: pgmap v832: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:55 vm03.local ceph-mon[50983]: pgmap v832: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:42:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:57 vm00.local ceph-mon[49980]: pgmap v833: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:57 vm03.local ceph-mon[50983]: pgmap v833: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:58.001 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:42:58.002 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:42:58.027 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:42:58.027 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:42:59.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:42:59 vm03.local ceph-mon[50983]: pgmap v834: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:42:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:42:59 vm00.local ceph-mon[49980]: pgmap v834: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:01.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:01 vm00.local ceph-mon[49980]: pgmap v835: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:01 vm03.local ceph-mon[50983]: pgmap v835: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:03.029 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:03.029 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:03.057 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:03.057 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:03 vm00.local ceph-mon[49980]: pgmap v836: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:03 vm03.local ceph-mon[50983]: pgmap v836: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:05 vm00.local ceph-mon[49980]: pgmap v837: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:05 vm03.local ceph-mon[50983]: pgmap v837: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:07 vm00.local ceph-mon[49980]: pgmap v838: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:07 vm03.local ceph-mon[50983]: pgmap v838: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:08.058 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:08.059 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:08.085 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:08.086 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:09 vm00.local ceph-mon[49980]: pgmap v839: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:09 vm03.local ceph-mon[50983]: pgmap v839: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:11.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:11 vm00.local ceph-mon[49980]: pgmap v840: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:11 vm03.local ceph-mon[50983]: pgmap v840: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:13.087 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:13.087 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:13.113 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:13.114 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:13 vm00.local ceph-mon[49980]: pgmap v841: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:13 vm03.local ceph-mon[50983]: pgmap v841: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:15 vm00.local ceph-mon[49980]: pgmap v842: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:15 vm03.local ceph-mon[50983]: pgmap v842: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:17 vm00.local ceph-mon[49980]: pgmap v843: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:17 vm03.local ceph-mon[50983]: pgmap v843: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:18.115 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:18.116 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:18.141 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:18.142 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:19 vm00.local ceph-mon[49980]: pgmap v844: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:19 vm03.local ceph-mon[50983]: pgmap v844: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:21 vm00.local ceph-mon[49980]: pgmap v845: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:21 vm03.local ceph-mon[50983]: pgmap v845: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:23.143 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:23.144 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:23.169 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:23.169 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:23 vm00.local ceph-mon[49980]: pgmap v846: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:23 vm03.local ceph-mon[50983]: pgmap v846: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:25 vm00.local ceph-mon[49980]: pgmap v847: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:25 vm03.local ceph-mon[50983]: pgmap v847: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:27 vm00.local ceph-mon[49980]: pgmap v848: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:27 vm03.local ceph-mon[50983]: pgmap v848: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:28.170 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:28.171 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:28.197 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:28.198 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:29 vm00.local ceph-mon[49980]: pgmap v849: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:29 vm03.local ceph-mon[50983]: pgmap v849: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:31 vm00.local ceph-mon[49980]: pgmap v850: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:31 vm03.local ceph-mon[50983]: pgmap v850: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:33.199 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:33.200 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:33.275 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:33.275 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:33 vm00.local ceph-mon[49980]: pgmap v851: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:33 vm03.local ceph-mon[50983]: pgmap v851: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:34.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:34 vm00.local ceph-mon[49980]: pgmap v852: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:34.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:34 vm03.local ceph-mon[50983]: pgmap v852: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:43:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:43:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:43:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:43:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:37 vm03.local ceph-mon[50983]: pgmap v853: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:37 vm00.local ceph-mon[49980]: pgmap v853: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:38.277 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:38.277 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:38.303 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:38.304 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:39.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:39 vm03.local ceph-mon[50983]: pgmap v854: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:39 vm00.local ceph-mon[49980]: pgmap v854: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:41 vm00.local ceph-mon[49980]: pgmap v855: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:41 vm03.local ceph-mon[50983]: pgmap v855: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:43.305 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:43.305 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:43.331 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:43.332 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:43.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:43 vm00.local ceph-mon[49980]: pgmap v856: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:43 vm03.local ceph-mon[50983]: pgmap v856: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:45 vm00.local ceph-mon[49980]: pgmap v857: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:43:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:43:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:43:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:43:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:43:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:45 vm03.local ceph-mon[50983]: pgmap v857: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:43:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:43:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:43:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:43:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:43:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:46 vm00.local ceph-mon[49980]: pgmap v858: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:46 vm03.local ceph-mon[50983]: pgmap v858: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:48.333 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:48.334 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:48.441 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:48.442 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:49.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:49 vm03.local ceph-mon[50983]: pgmap v859: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:49 vm00.local ceph-mon[49980]: pgmap v859: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:51.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:51 vm00.local ceph-mon[49980]: pgmap v860: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:51 vm03.local ceph-mon[50983]: pgmap v860: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:53.443 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:53.444 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:53.469 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:53.469 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:53 vm00.local ceph-mon[49980]: pgmap v861: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:53 vm03.local ceph-mon[50983]: pgmap v861: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:55 vm00.local ceph-mon[49980]: pgmap v862: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:55 vm03.local ceph-mon[50983]: pgmap v862: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:43:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:57 vm00.local ceph-mon[49980]: pgmap v863: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:57 vm03.local ceph-mon[50983]: pgmap v863: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:58.471 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:43:58.471 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:43:58.496 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:43:58.496 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:43:59.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:43:59 vm03.local ceph-mon[50983]: pgmap v864: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:43:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:43:59 vm00.local ceph-mon[49980]: pgmap v864: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:01 vm00.local ceph-mon[49980]: pgmap v865: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:01 vm03.local ceph-mon[50983]: pgmap v865: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:03.498 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:03.498 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:03.525 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:03.525 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:03 vm00.local ceph-mon[49980]: pgmap v866: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:03 vm03.local ceph-mon[50983]: pgmap v866: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:05 vm00.local ceph-mon[49980]: pgmap v867: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:05 vm03.local ceph-mon[50983]: pgmap v867: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:07 vm00.local ceph-mon[49980]: pgmap v868: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:07 vm03.local ceph-mon[50983]: pgmap v868: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:08.527 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:08.527 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:08.553 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:08.554 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:09 vm00.local ceph-mon[49980]: pgmap v869: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:09 vm03.local ceph-mon[50983]: pgmap v869: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:11 vm00.local ceph-mon[49980]: pgmap v870: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:11 vm03.local ceph-mon[50983]: pgmap v870: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:13.345 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:13 vm00.local ceph-mon[49980]: pgmap v871: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:13.555 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:13.556 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:13 vm03.local ceph-mon[50983]: pgmap v871: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:13.581 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:13.582 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:15 vm00.local ceph-mon[49980]: pgmap v872: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:15 vm03.local ceph-mon[50983]: pgmap v872: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:17 vm00.local ceph-mon[49980]: pgmap v873: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:17 vm03.local ceph-mon[50983]: pgmap v873: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:18.583 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:18.583 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:18.609 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:18.609 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:19 vm00.local ceph-mon[49980]: pgmap v874: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:19 vm03.local ceph-mon[50983]: pgmap v874: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:21 vm00.local ceph-mon[49980]: pgmap v875: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:21 vm03.local ceph-mon[50983]: pgmap v875: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:23 vm00.local ceph-mon[49980]: pgmap v876: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:23 vm03.local ceph-mon[50983]: pgmap v876: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:23.611 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:23.611 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:23.637 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:23.637 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:25 vm00.local ceph-mon[49980]: pgmap v877: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:25 vm03.local ceph-mon[50983]: pgmap v877: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:27 vm00.local ceph-mon[49980]: pgmap v878: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:27 vm03.local ceph-mon[50983]: pgmap v878: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:28.639 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:28.639 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:28.664 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:28.664 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:29 vm00.local ceph-mon[49980]: pgmap v879: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:29 vm03.local ceph-mon[50983]: pgmap v879: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:31 vm00.local ceph-mon[49980]: pgmap v880: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:31 vm03.local ceph-mon[50983]: pgmap v880: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:33 vm00.local ceph-mon[49980]: pgmap v881: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:33 vm03.local ceph-mon[50983]: pgmap v881: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:33.665 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:33.666 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:33.690 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:33.690 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:35 vm00.local ceph-mon[49980]: pgmap v882: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:35 vm03.local ceph-mon[50983]: pgmap v882: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:44:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:44:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:44:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:44:37.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:37 vm00.local ceph-mon[49980]: pgmap v883: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:37 vm03.local ceph-mon[50983]: pgmap v883: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:38.692 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:38.692 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:38.718 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:38.718 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:39 vm00.local ceph-mon[49980]: pgmap v884: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:39 vm03.local ceph-mon[50983]: pgmap v884: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:41 vm00.local ceph-mon[49980]: pgmap v885: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:41 vm03.local ceph-mon[50983]: pgmap v885: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:43 vm00.local ceph-mon[49980]: pgmap v886: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:43 vm03.local ceph-mon[50983]: pgmap v886: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:43.720 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:43.720 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:43.745 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:43.746 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:45 vm00.local ceph-mon[49980]: pgmap v887: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:44:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:44:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:44:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:44:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:44:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:45 vm03.local ceph-mon[50983]: pgmap v887: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:44:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:44:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:44:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:44:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:44:46.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:46 vm00.local ceph-mon[49980]: pgmap v888: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:46.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:46 vm03.local ceph-mon[50983]: pgmap v888: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:48.747 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:48.747 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:48.756 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:48 vm00.local ceph-mon[49980]: pgmap v889: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:48.777 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:48.778 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:48.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:48 vm03.local ceph-mon[50983]: pgmap v889: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:51 vm00.local ceph-mon[49980]: pgmap v890: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:51 vm03.local ceph-mon[50983]: pgmap v890: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:53 vm00.local ceph-mon[49980]: pgmap v891: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:53 vm03.local ceph-mon[50983]: pgmap v891: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:53.779 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:53.780 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:53.805 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:53.805 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:55 vm00.local ceph-mon[49980]: pgmap v892: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:55 vm03.local ceph-mon[50983]: pgmap v892: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:44:57.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:57 vm00.local ceph-mon[49980]: pgmap v893: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:57 vm03.local ceph-mon[50983]: pgmap v893: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:58.807 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:44:58.807 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:44:58.834 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:44:58.834 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:44:59.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:44:59 vm03.local ceph-mon[50983]: pgmap v894: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:44:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:44:59 vm00.local ceph-mon[49980]: pgmap v894: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:01 vm00.local ceph-mon[49980]: pgmap v895: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:01 vm03.local ceph-mon[50983]: pgmap v895: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:03 vm00.local ceph-mon[49980]: pgmap v896: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:03 vm03.local ceph-mon[50983]: pgmap v896: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:03.835 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:03.836 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:03.862 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:03.862 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:05 vm00.local ceph-mon[49980]: pgmap v897: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:05 vm03.local ceph-mon[50983]: pgmap v897: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:07 vm00.local ceph-mon[49980]: pgmap v898: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:07 vm03.local ceph-mon[50983]: pgmap v898: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:08.863 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:08.864 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:08.889 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:08.890 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:09.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:09 vm03.local ceph-mon[50983]: pgmap v899: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:09 vm00.local ceph-mon[49980]: pgmap v899: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:11 vm00.local ceph-mon[49980]: pgmap v900: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:11 vm03.local ceph-mon[50983]: pgmap v900: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:13 vm00.local ceph-mon[49980]: pgmap v901: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:13 vm03.local ceph-mon[50983]: pgmap v901: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:13.891 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:13.891 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:13.920 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:13.921 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:15 vm00.local ceph-mon[49980]: pgmap v902: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:15 vm03.local ceph-mon[50983]: pgmap v902: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:17 vm00.local ceph-mon[49980]: pgmap v903: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:17 vm03.local ceph-mon[50983]: pgmap v903: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:18.922 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:18.923 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:18.949 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:18.950 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:19 vm00.local ceph-mon[49980]: pgmap v904: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:19 vm03.local ceph-mon[50983]: pgmap v904: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:21 vm00.local ceph-mon[49980]: pgmap v905: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:21 vm03.local ceph-mon[50983]: pgmap v905: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:23 vm00.local ceph-mon[49980]: pgmap v906: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:23 vm03.local ceph-mon[50983]: pgmap v906: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:23.951 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:23.951 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:23.977 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:23.977 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:25 vm00.local ceph-mon[49980]: pgmap v907: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:25 vm03.local ceph-mon[50983]: pgmap v907: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:27 vm00.local ceph-mon[49980]: pgmap v908: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:27 vm03.local ceph-mon[50983]: pgmap v908: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:28.978 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:28.979 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:29.006 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:29.006 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:29 vm00.local ceph-mon[49980]: pgmap v909: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:29 vm03.local ceph-mon[50983]: pgmap v909: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:31 vm00.local ceph-mon[49980]: pgmap v910: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:31 vm03.local ceph-mon[50983]: pgmap v910: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:33 vm00.local ceph-mon[49980]: pgmap v911: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:33 vm03.local ceph-mon[50983]: pgmap v911: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:34.007 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:34.008 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:34.035 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:34.035 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:35 vm00.local ceph-mon[49980]: pgmap v912: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:35 vm03.local ceph-mon[50983]: pgmap v912: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:45:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:45:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:45:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:45:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:37 vm00.local ceph-mon[49980]: pgmap v913: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:37 vm03.local ceph-mon[50983]: pgmap v913: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:39.037 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:39.037 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:39.064 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:39.065 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:39 vm00.local ceph-mon[49980]: pgmap v914: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:39 vm03.local ceph-mon[50983]: pgmap v914: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:41 vm00.local ceph-mon[49980]: pgmap v915: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:41 vm03.local ceph-mon[50983]: pgmap v915: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:43 vm00.local ceph-mon[49980]: pgmap v916: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:43 vm03.local ceph-mon[50983]: pgmap v916: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:44.066 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:44.066 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:44.093 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:44.093 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:45.187 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:45 vm00.local ceph-mon[49980]: pgmap v917: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:45 vm03.local ceph-mon[50983]: pgmap v917: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:45:45.188 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:45:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:47 vm00.local ceph-mon[49980]: pgmap v918: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:47 vm03.local ceph-mon[50983]: pgmap v918: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:49.094 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:49.095 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:49.120 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:49.120 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:49 vm00.local ceph-mon[49980]: pgmap v919: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:49.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:49 vm03.local ceph-mon[50983]: pgmap v919: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:51 vm00.local ceph-mon[49980]: pgmap v920: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:51 vm03.local ceph-mon[50983]: pgmap v920: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:53 vm00.local ceph-mon[49980]: pgmap v921: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:53 vm03.local ceph-mon[50983]: pgmap v921: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:54.121 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:54.122 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:54.151 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:54.151 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:55 vm00.local ceph-mon[49980]: pgmap v922: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:55 vm03.local ceph-mon[50983]: pgmap v922: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:45:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:57 vm00.local ceph-mon[49980]: pgmap v923: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:57 vm03.local ceph-mon[50983]: pgmap v923: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:59.153 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:45:59.153 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:45:59.179 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:45:59.179 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:45:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:45:59 vm00.local ceph-mon[49980]: pgmap v924: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:45:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:45:59 vm03.local ceph-mon[50983]: pgmap v924: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:01 vm00.local ceph-mon[49980]: pgmap v925: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:01 vm03.local ceph-mon[50983]: pgmap v925: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:03 vm00.local ceph-mon[49980]: pgmap v926: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:03 vm03.local ceph-mon[50983]: pgmap v926: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:04.181 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:04.181 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:04.206 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:04.207 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:05 vm00.local ceph-mon[49980]: pgmap v927: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:05 vm03.local ceph-mon[50983]: pgmap v927: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:07 vm00.local ceph-mon[49980]: pgmap v928: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:07 vm03.local ceph-mon[50983]: pgmap v928: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:09.208 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:09.209 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:09.234 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:09.234 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:09.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:09 vm03.local ceph-mon[50983]: pgmap v929: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:09 vm00.local ceph-mon[49980]: pgmap v929: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:11 vm00.local ceph-mon[49980]: pgmap v930: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:11 vm03.local ceph-mon[50983]: pgmap v930: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:13 vm00.local ceph-mon[49980]: pgmap v931: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:13 vm03.local ceph-mon[50983]: pgmap v931: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:14.235 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:14.236 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:14.261 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:14.261 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:15 vm00.local ceph-mon[49980]: pgmap v932: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:15 vm03.local ceph-mon[50983]: pgmap v932: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:17 vm00.local ceph-mon[49980]: pgmap v933: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:17 vm03.local ceph-mon[50983]: pgmap v933: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:19.262 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:19.263 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:19.288 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:19.288 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:19 vm00.local ceph-mon[49980]: pgmap v934: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:19 vm03.local ceph-mon[50983]: pgmap v934: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:21 vm00.local ceph-mon[49980]: pgmap v935: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:21 vm03.local ceph-mon[50983]: pgmap v935: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:23 vm00.local ceph-mon[49980]: pgmap v936: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:23 vm03.local ceph-mon[50983]: pgmap v936: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:24.289 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:24.290 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:24.314 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:24.315 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:25 vm00.local ceph-mon[49980]: pgmap v937: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:25 vm03.local ceph-mon[50983]: pgmap v937: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:27 vm00.local ceph-mon[49980]: pgmap v938: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:27 vm03.local ceph-mon[50983]: pgmap v938: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:29.316 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:29.316 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:29.341 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:29.341 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:29 vm00.local ceph-mon[49980]: pgmap v939: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:29 vm03.local ceph-mon[50983]: pgmap v939: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:31 vm00.local ceph-mon[49980]: pgmap v940: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:31 vm03.local ceph-mon[50983]: pgmap v940: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:33 vm00.local ceph-mon[49980]: pgmap v941: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:33 vm03.local ceph-mon[50983]: pgmap v941: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:34.342 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:34.343 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:34.369 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:34.369 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:35 vm00.local ceph-mon[49980]: pgmap v942: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:35 vm03.local ceph-mon[50983]: pgmap v942: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:46:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:46:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:46:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:46:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:37 vm00.local ceph-mon[49980]: pgmap v943: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:37 vm03.local ceph-mon[50983]: pgmap v943: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:39.370 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:39.371 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:39.395 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:39.396 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:39 vm00.local ceph-mon[49980]: pgmap v944: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:39 vm03.local ceph-mon[50983]: pgmap v944: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:41 vm00.local ceph-mon[49980]: pgmap v945: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:41 vm03.local ceph-mon[50983]: pgmap v945: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:43 vm00.local ceph-mon[49980]: pgmap v946: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:43 vm03.local ceph-mon[50983]: pgmap v946: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:44.397 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:44.397 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:44.423 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:44.423 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:45.488 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:45 vm03.local ceph-mon[50983]: pgmap v947: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:45.488 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:46:45.488 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:46:45.488 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:45 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:46:45.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:45 vm00.local ceph-mon[49980]: pgmap v947: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:45.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:46:45.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:46:45.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:45 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:46:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:46 vm00.local ceph-mon[49980]: pgmap v948: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:46 vm03.local ceph-mon[50983]: pgmap v948: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:46:48.765 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:48 vm00.local ceph-mon[49980]: pgmap v949: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:48.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:48 vm03.local ceph-mon[50983]: pgmap v949: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:49.425 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:49.425 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:49.451 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:49.452 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:51 vm00.local ceph-mon[49980]: pgmap v950: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:51 vm03.local ceph-mon[50983]: pgmap v950: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:53 vm00.local ceph-mon[49980]: pgmap v951: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:53 vm03.local ceph-mon[50983]: pgmap v951: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:54.453 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:54.454 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:54.479 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:54.479 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:55 vm00.local ceph-mon[49980]: pgmap v952: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:55 vm03.local ceph-mon[50983]: pgmap v952: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:46:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:57 vm00.local ceph-mon[49980]: pgmap v953: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:57 vm03.local ceph-mon[50983]: pgmap v953: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:59.481 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:46:59.481 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:46:59.506 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:46:59.506 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:46:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:46:59 vm00.local ceph-mon[49980]: pgmap v954: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:46:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:46:59 vm03.local ceph-mon[50983]: pgmap v954: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:01 vm00.local ceph-mon[49980]: pgmap v955: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:01 vm03.local ceph-mon[50983]: pgmap v955: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:03 vm00.local ceph-mon[49980]: pgmap v956: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:03 vm03.local ceph-mon[50983]: pgmap v956: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:04.507 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:04.508 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:04.534 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:04.534 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:05 vm00.local ceph-mon[49980]: pgmap v957: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:05 vm03.local ceph-mon[50983]: pgmap v957: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:07 vm00.local ceph-mon[49980]: pgmap v958: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:07 vm03.local ceph-mon[50983]: pgmap v958: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:09 vm00.local ceph-mon[49980]: pgmap v959: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:09.535 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:09.536 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:09 vm03.local ceph-mon[50983]: pgmap v959: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:09.563 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:09.564 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:11 vm00.local ceph-mon[49980]: pgmap v960: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:11 vm03.local ceph-mon[50983]: pgmap v960: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:13 vm00.local ceph-mon[49980]: pgmap v961: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:13 vm03.local ceph-mon[50983]: pgmap v961: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:14.565 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:14.566 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:14.592 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:14.593 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:15 vm00.local ceph-mon[49980]: pgmap v962: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:15 vm03.local ceph-mon[50983]: pgmap v962: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:17 vm00.local ceph-mon[49980]: pgmap v963: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:17 vm03.local ceph-mon[50983]: pgmap v963: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:19 vm00.local ceph-mon[49980]: pgmap v964: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:19 vm03.local ceph-mon[50983]: pgmap v964: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:19.594 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:19.594 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:19.620 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:19.621 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:21 vm00.local ceph-mon[49980]: pgmap v965: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:21 vm03.local ceph-mon[50983]: pgmap v965: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:23 vm00.local ceph-mon[49980]: pgmap v966: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:23 vm03.local ceph-mon[50983]: pgmap v966: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:24.622 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:24.623 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:24.751 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:24.751 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:25 vm00.local ceph-mon[49980]: pgmap v967: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:25 vm03.local ceph-mon[50983]: pgmap v967: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:27 vm00.local ceph-mon[49980]: pgmap v968: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:27 vm03.local ceph-mon[50983]: pgmap v968: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:29 vm00.local ceph-mon[49980]: pgmap v969: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:29 vm03.local ceph-mon[50983]: pgmap v969: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:29.753 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:29.753 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:29.778 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:29.779 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:31 vm00.local ceph-mon[49980]: pgmap v970: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:31 vm03.local ceph-mon[50983]: pgmap v970: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:33.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:33 vm00.local ceph-mon[49980]: pgmap v971: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:33 vm03.local ceph-mon[50983]: pgmap v971: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:34.781 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:34.781 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:34.808 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:34.808 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:35 vm00.local ceph-mon[49980]: pgmap v972: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:35 vm03.local ceph-mon[50983]: pgmap v972: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:47:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:47:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:47:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:47:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:37 vm00.local ceph-mon[49980]: pgmap v973: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:37 vm03.local ceph-mon[50983]: pgmap v973: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:39 vm00.local ceph-mon[49980]: pgmap v974: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:39 vm03.local ceph-mon[50983]: pgmap v974: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:39.809 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:39.810 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:39.836 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:39.836 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:41 vm00.local ceph-mon[49980]: pgmap v975: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:41 vm03.local ceph-mon[50983]: pgmap v975: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:43.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:43 vm00.local ceph-mon[49980]: pgmap v976: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:43 vm03.local ceph-mon[50983]: pgmap v976: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:44.837 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:44.838 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:44.865 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:44.865 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:45 vm00.local ceph-mon[49980]: pgmap v977: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:45 vm03.local ceph-mon[50983]: pgmap v977: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:46.239 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:47:46.239 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:47:46.239 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:47:46.240 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:46 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:46.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:47:46.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:47:46.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:47:46.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:46 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:47 vm00.local ceph-mon[49980]: pgmap v978: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:47 vm03.local ceph-mon[50983]: pgmap v978: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:48.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:48 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:48.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:48 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:47:48.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:48 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:48.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:48 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:48.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:48 vm00.local ceph-mon[49980]: pgmap v979: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:48.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:48 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:48.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:48 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:47:48.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:48 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:48.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:48 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:47:48.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:48 vm03.local ceph-mon[50983]: pgmap v979: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:49.867 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:49.868 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:49.896 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:49.897 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:51 vm00.local ceph-mon[49980]: pgmap v980: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:51 vm03.local ceph-mon[50983]: pgmap v980: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:53 vm00.local ceph-mon[49980]: pgmap v981: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:53 vm03.local ceph-mon[50983]: pgmap v981: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:54.899 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:54.899 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:54.926 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:54.926 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:47:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:55 vm00.local ceph-mon[49980]: pgmap v982: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:55 vm03.local ceph-mon[50983]: pgmap v982: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:57 vm00.local ceph-mon[49980]: pgmap v983: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:57 vm03.local ceph-mon[50983]: pgmap v983: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:47:58.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:47:58 vm03.local ceph-mon[50983]: pgmap v984: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:58.714 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:47:58 vm00.local ceph-mon[49980]: pgmap v984: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:47:59.927 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:47:59.928 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:47:59.954 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:47:59.955 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:01 vm00.local ceph-mon[49980]: pgmap v985: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:01 vm03.local ceph-mon[50983]: pgmap v985: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:03.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:03 vm00.local ceph-mon[49980]: pgmap v986: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:03 vm03.local ceph-mon[50983]: pgmap v986: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:04.957 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:04.958 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:04.984 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:04.984 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:05 vm00.local ceph-mon[49980]: pgmap v987: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:05 vm03.local ceph-mon[50983]: pgmap v987: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:07 vm00.local ceph-mon[49980]: pgmap v988: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:07 vm03.local ceph-mon[50983]: pgmap v988: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:09 vm00.local ceph-mon[49980]: pgmap v989: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:09 vm03.local ceph-mon[50983]: pgmap v989: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:09.986 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:09.986 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:10.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:10.012 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:11 vm00.local ceph-mon[49980]: pgmap v990: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:11 vm03.local ceph-mon[50983]: pgmap v990: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:13.339 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:13 vm00.local ceph-mon[49980]: pgmap v991: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:13 vm03.local ceph-mon[50983]: pgmap v991: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:15.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:15.014 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:15.041 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:15.041 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:15 vm00.local ceph-mon[49980]: pgmap v992: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:15 vm03.local ceph-mon[50983]: pgmap v992: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:17 vm00.local ceph-mon[49980]: pgmap v993: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:17 vm03.local ceph-mon[50983]: pgmap v993: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:19 vm00.local ceph-mon[49980]: pgmap v994: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:19 vm03.local ceph-mon[50983]: pgmap v994: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:20.043 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:20.044 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:20.072 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:20.073 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:21 vm00.local ceph-mon[49980]: pgmap v995: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:21 vm03.local ceph-mon[50983]: pgmap v995: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:23.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:23 vm00.local ceph-mon[49980]: pgmap v996: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:23 vm03.local ceph-mon[50983]: pgmap v996: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:25.074 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:25.075 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:25.100 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:25.100 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:25 vm00.local ceph-mon[49980]: pgmap v997: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:25 vm03.local ceph-mon[50983]: pgmap v997: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:27 vm00.local ceph-mon[49980]: pgmap v998: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:27 vm03.local ceph-mon[50983]: pgmap v998: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:28.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:28 vm00.local ceph-mon[49980]: pgmap v999: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:28.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:28 vm03.local ceph-mon[50983]: pgmap v999: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:30.102 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:30.102 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:30.190 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:30.190 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:31 vm00.local ceph-mon[49980]: pgmap v1000: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:31 vm03.local ceph-mon[50983]: pgmap v1000: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:33 vm00.local ceph-mon[49980]: pgmap v1001: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:33 vm03.local ceph-mon[50983]: pgmap v1001: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:35.192 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:35.192 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:35.227 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:35.227 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:35 vm00.local ceph-mon[49980]: pgmap v1002: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:35 vm03.local ceph-mon[50983]: pgmap v1002: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:48:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:48:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:48:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:48:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:37 vm00.local ceph-mon[49980]: pgmap v1003: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:37 vm03.local ceph-mon[50983]: pgmap v1003: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:39 vm00.local ceph-mon[49980]: pgmap v1004: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:39 vm03.local ceph-mon[50983]: pgmap v1004: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:40.229 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:40.229 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:40.254 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:40.254 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:41 vm00.local ceph-mon[49980]: pgmap v1005: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:41 vm03.local ceph-mon[50983]: pgmap v1005: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:43 vm00.local ceph-mon[49980]: pgmap v1006: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:43 vm03.local ceph-mon[50983]: pgmap v1006: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:45.256 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:45.256 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:45.284 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:45.284 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:45 vm00.local ceph-mon[49980]: pgmap v1007: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:45 vm03.local ceph-mon[50983]: pgmap v1007: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:46 vm00.local ceph-mon[49980]: pgmap v1008: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:46 vm03.local ceph-mon[50983]: pgmap v1008: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:47 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:48:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:47 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:48:47.557 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:47 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:48:47.612 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:47 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:48:47.612 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:47 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:48:47.612 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:47 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:48:48.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:48 vm03.local ceph-mon[50983]: pgmap v1009: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:48.510 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:48 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:48.691 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:48 vm00.local ceph-mon[49980]: pgmap v1009: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:48.691 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:48 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:49 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:49 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:50.287 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:50.287 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:50.315 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:50.316 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:50 vm03.local ceph-mon[50983]: pgmap v1010: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:48:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:50 vm00.local ceph-mon[49980]: pgmap v1010: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:48:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:48:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:52 vm03.local ceph-mon[50983]: pgmap v1011: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:53.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:52 vm00.local ceph-mon[49980]: pgmap v1011: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:54 vm03.local ceph-mon[50983]: pgmap v1012: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:54 vm00.local ceph-mon[49980]: pgmap v1012: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:55.317 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:48:55.318 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:48:55.344 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:48:55.345 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:48:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:57 vm00.local ceph-mon[49980]: pgmap v1013: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:57 vm03.local ceph-mon[50983]: pgmap v1013: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:48:58.778 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:48:58 vm00.local ceph-mon[49980]: pgmap v1014: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:48:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:48:58 vm03.local ceph-mon[50983]: pgmap v1014: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:00.346 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:00.347 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:00.387 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:00.388 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:01.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:01 vm00.local ceph-mon[49980]: pgmap v1015: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:01 vm03.local ceph-mon[50983]: pgmap v1015: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:03.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:03 vm00.local ceph-mon[49980]: pgmap v1016: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:03 vm03.local ceph-mon[50983]: pgmap v1016: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:05.390 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:05.390 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:05.416 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:05.416 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:05 vm00.local ceph-mon[49980]: pgmap v1017: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:05 vm03.local ceph-mon[50983]: pgmap v1017: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:07 vm00.local ceph-mon[49980]: pgmap v1018: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:07 vm03.local ceph-mon[50983]: pgmap v1018: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:09 vm00.local ceph-mon[49980]: pgmap v1019: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:09 vm03.local ceph-mon[50983]: pgmap v1019: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:10.418 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:10.418 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:10.448 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:10.449 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:11 vm00.local ceph-mon[49980]: pgmap v1020: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:11 vm03.local ceph-mon[50983]: pgmap v1020: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:13.345 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:13 vm00.local ceph-mon[49980]: pgmap v1021: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:13 vm03.local ceph-mon[50983]: pgmap v1021: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:15.450 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:15.451 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:15.478 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:15.478 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:15 vm00.local ceph-mon[49980]: pgmap v1022: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:15 vm03.local ceph-mon[50983]: pgmap v1022: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:17 vm00.local ceph-mon[49980]: pgmap v1023: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:17 vm03.local ceph-mon[50983]: pgmap v1023: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:19 vm00.local ceph-mon[49980]: pgmap v1024: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:19 vm03.local ceph-mon[50983]: pgmap v1024: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:20.480 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:20.480 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:20.507 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:20.507 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:21 vm00.local ceph-mon[49980]: pgmap v1025: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:21 vm03.local ceph-mon[50983]: pgmap v1025: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:23 vm00.local ceph-mon[49980]: pgmap v1026: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:23 vm03.local ceph-mon[50983]: pgmap v1026: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:25.509 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:25.509 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:25 vm00.local ceph-mon[49980]: pgmap v1027: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:25.535 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:25.536 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:25 vm03.local ceph-mon[50983]: pgmap v1027: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:26.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:26 vm03.local ceph-mon[50983]: pgmap v1028: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:26 vm00.local ceph-mon[49980]: pgmap v1028: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:28 vm00.local ceph-mon[49980]: pgmap v1029: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:28 vm03.local ceph-mon[50983]: pgmap v1029: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:30.537 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:30.538 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:30.564 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:30.564 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:31 vm00.local ceph-mon[49980]: pgmap v1030: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:31 vm03.local ceph-mon[50983]: pgmap v1030: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:33 vm00.local ceph-mon[49980]: pgmap v1031: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:33 vm03.local ceph-mon[50983]: pgmap v1031: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:35 vm00.local ceph-mon[49980]: pgmap v1032: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:35 vm03.local ceph-mon[50983]: pgmap v1032: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:35.565 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:35.566 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:35.597 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:35.598 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:49:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:49:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:49:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:49:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:37 vm00.local ceph-mon[49980]: pgmap v1033: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:37 vm03.local ceph-mon[50983]: pgmap v1033: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:39.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:39 vm00.local ceph-mon[49980]: pgmap v1034: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:39 vm03.local ceph-mon[50983]: pgmap v1034: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:40.599 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:40.599 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:40.626 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:40.626 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:41 vm03.local ceph-mon[50983]: pgmap v1035: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:41 vm00.local ceph-mon[49980]: pgmap v1035: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:42.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:42 vm00.local ceph-mon[49980]: pgmap v1036: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:42.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:42 vm03.local ceph-mon[50983]: pgmap v1036: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:44 vm03.local ceph-mon[50983]: pgmap v1037: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:44 vm00.local ceph-mon[49980]: pgmap v1037: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:45.628 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:45.628 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:45.684 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:45.685 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:47 vm00.local ceph-mon[49980]: pgmap v1038: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:47 vm03.local ceph-mon[50983]: pgmap v1038: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:49.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:49 vm00.local ceph-mon[49980]: pgmap v1039: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:49.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:49 vm03.local ceph-mon[50983]: pgmap v1039: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:50.231 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:49:50.231 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:49:50.231 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:49:50.231 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:49:50.231 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:49:50.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:49:50.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:49:50.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:49:50.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:49:50.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:49:50.687 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:50.687 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:50.716 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:50.716 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:51 vm00.local ceph-mon[49980]: pgmap v1040: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:51 vm03.local ceph-mon[50983]: pgmap v1040: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:53 vm00.local ceph-mon[49980]: pgmap v1041: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:53 vm03.local ceph-mon[50983]: pgmap v1041: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:55 vm00.local ceph-mon[49980]: pgmap v1042: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:55 vm03.local ceph-mon[50983]: pgmap v1042: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:55.718 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:49:55.718 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:49:55.745 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:49:55.745 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:49:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:57 vm00.local ceph-mon[49980]: pgmap v1043: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:57 vm03.local ceph-mon[50983]: pgmap v1043: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:49:59.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:49:59 vm00.local ceph-mon[49980]: pgmap v1044: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:49:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:49:59 vm03.local ceph-mon[50983]: pgmap v1044: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:00.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:00 vm00.local ceph-mon[49980]: pgmap v1045: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:00.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T05:50:00.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:00 vm03.local ceph-mon[50983]: pgmap v1045: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:00.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T05:50:00.746 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:00.747 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:00.772 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:00.773 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:02.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:02 vm03.local ceph-mon[50983]: pgmap v1046: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:02 vm00.local ceph-mon[49980]: pgmap v1046: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:04 vm00.local ceph-mon[49980]: pgmap v1047: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:04 vm03.local ceph-mon[50983]: pgmap v1047: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:05.774 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:05.775 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:05.800 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:05.801 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:07 vm00.local ceph-mon[49980]: pgmap v1048: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:07 vm03.local ceph-mon[50983]: pgmap v1048: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:09.505 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:09 vm03.local ceph-mon[50983]: pgmap v1049: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:09.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:09 vm00.local ceph-mon[49980]: pgmap v1049: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:10.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:10 vm03.local ceph-mon[50983]: pgmap v1050: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:10.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:10 vm00.local ceph-mon[49980]: pgmap v1050: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:10.802 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:10.803 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:10.828 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:10.828 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:12 vm00.local ceph-mon[49980]: pgmap v1051: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:12 vm03.local ceph-mon[50983]: pgmap v1051: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:14 vm00.local ceph-mon[49980]: pgmap v1052: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:14 vm03.local ceph-mon[50983]: pgmap v1052: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:15.829 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:15.830 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:15.980 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:15.981 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:16 vm00.local ceph-mon[49980]: pgmap v1053: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:16 vm03.local ceph-mon[50983]: pgmap v1053: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:18.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:18 vm00.local ceph-mon[49980]: pgmap v1054: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:18.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:18 vm03.local ceph-mon[50983]: pgmap v1054: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:20.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:20 vm00.local ceph-mon[49980]: pgmap v1055: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:20.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:20 vm03.local ceph-mon[50983]: pgmap v1055: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:20.982 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:20.983 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:21.008 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:21.009 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:22 vm03.local ceph-mon[50983]: pgmap v1056: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:22 vm00.local ceph-mon[49980]: pgmap v1056: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:24 vm03.local ceph-mon[50983]: pgmap v1057: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:24 vm00.local ceph-mon[49980]: pgmap v1057: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:26.011 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:26.011 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:26.037 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:26.037 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:26.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:26 vm00.local ceph-mon[49980]: pgmap v1058: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:26.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:26 vm03.local ceph-mon[50983]: pgmap v1058: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:28 vm00.local ceph-mon[49980]: pgmap v1059: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:28 vm03.local ceph-mon[50983]: pgmap v1059: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:30 vm00.local ceph-mon[49980]: pgmap v1060: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:31.038 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:31.039 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:30 vm03.local ceph-mon[50983]: pgmap v1060: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:31.064 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:31.065 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:32 vm00.local ceph-mon[49980]: pgmap v1061: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:32 vm03.local ceph-mon[50983]: pgmap v1061: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:34 vm00.local ceph-mon[49980]: pgmap v1062: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:34 vm03.local ceph-mon[50983]: pgmap v1062: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:50:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:50:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:50:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:50:36.066 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:36.066 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:36.092 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:36.092 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:36 vm00.local ceph-mon[49980]: pgmap v1063: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:36 vm03.local ceph-mon[50983]: pgmap v1063: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:38 vm00.local ceph-mon[49980]: pgmap v1064: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:38 vm03.local ceph-mon[50983]: pgmap v1064: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:40 vm00.local ceph-mon[49980]: pgmap v1065: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:40 vm03.local ceph-mon[50983]: pgmap v1065: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:41.094 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:41.094 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:41.120 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:41.121 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:42 vm00.local ceph-mon[49980]: pgmap v1066: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:42 vm03.local ceph-mon[50983]: pgmap v1066: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:44 vm00.local ceph-mon[49980]: pgmap v1067: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:44 vm03.local ceph-mon[50983]: pgmap v1067: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:46.122 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:46.123 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:46.148 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:46.149 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:46.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:46 vm00.local ceph-mon[49980]: pgmap v1068: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:46.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:46 vm03.local ceph-mon[50983]: pgmap v1068: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:48 vm00.local ceph-mon[49980]: pgmap v1069: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:48 vm03.local ceph-mon[50983]: pgmap v1069: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:50 vm00.local ceph-mon[49980]: pgmap v1070: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:50:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:50:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:50:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:50:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:50:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:50 vm03.local ceph-mon[50983]: pgmap v1070: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:50:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:50:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:50:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:50:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:50:51.150 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:51.150 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:51.177 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:51.177 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:52 vm00.local ceph-mon[49980]: pgmap v1071: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:52 vm03.local ceph-mon[50983]: pgmap v1071: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:54 vm00.local ceph-mon[49980]: pgmap v1072: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:54 vm03.local ceph-mon[50983]: pgmap v1072: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:56.178 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:50:56.179 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:50:56.205 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:50:56.205 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:50:56.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:56 vm00.local ceph-mon[49980]: pgmap v1073: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:56 vm03.local ceph-mon[50983]: pgmap v1073: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:50:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:50:58 vm00.local ceph-mon[49980]: pgmap v1074: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:50:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:50:58 vm03.local ceph-mon[50983]: pgmap v1074: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:00 vm00.local ceph-mon[49980]: pgmap v1075: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:00 vm03.local ceph-mon[50983]: pgmap v1075: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:01.206 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:01.207 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:01.232 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:01.232 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:02 vm00.local ceph-mon[49980]: pgmap v1076: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:02 vm03.local ceph-mon[50983]: pgmap v1076: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:04 vm00.local ceph-mon[49980]: pgmap v1077: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:04 vm03.local ceph-mon[50983]: pgmap v1077: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:06.233 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:06.234 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:06.318 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:06.318 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:06 vm00.local ceph-mon[49980]: pgmap v1078: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:06 vm03.local ceph-mon[50983]: pgmap v1078: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:08 vm00.local ceph-mon[49980]: pgmap v1079: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:08 vm03.local ceph-mon[50983]: pgmap v1079: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:10 vm00.local ceph-mon[49980]: pgmap v1080: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:10 vm03.local ceph-mon[50983]: pgmap v1080: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:11.320 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:11.320 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:11.347 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:11.347 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:12 vm00.local ceph-mon[49980]: pgmap v1081: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:12 vm03.local ceph-mon[50983]: pgmap v1081: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:14 vm00.local ceph-mon[49980]: pgmap v1082: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:14 vm03.local ceph-mon[50983]: pgmap v1082: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:16.349 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:16.350 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:16.376 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:16.376 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:16 vm00.local ceph-mon[49980]: pgmap v1083: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:16 vm03.local ceph-mon[50983]: pgmap v1083: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:18 vm00.local ceph-mon[49980]: pgmap v1084: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:18 vm03.local ceph-mon[50983]: pgmap v1084: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:20 vm00.local ceph-mon[49980]: pgmap v1085: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:20 vm03.local ceph-mon[50983]: pgmap v1085: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:21.377 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:21.378 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:21.404 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:21.405 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:22 vm00.local ceph-mon[49980]: pgmap v1086: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:22 vm03.local ceph-mon[50983]: pgmap v1086: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:24 vm00.local ceph-mon[49980]: pgmap v1087: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:24 vm03.local ceph-mon[50983]: pgmap v1087: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:26.406 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:26.407 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:26.431 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:26.432 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:26.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:26 vm03.local ceph-mon[50983]: pgmap v1088: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:26 vm00.local ceph-mon[49980]: pgmap v1088: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:28 vm00.local ceph-mon[49980]: pgmap v1089: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:28 vm03.local ceph-mon[50983]: pgmap v1089: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:30 vm00.local ceph-mon[49980]: pgmap v1090: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:30 vm03.local ceph-mon[50983]: pgmap v1090: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:31.433 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:31.433 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:31.467 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:31.467 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:32 vm00.local ceph-mon[49980]: pgmap v1091: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:32 vm03.local ceph-mon[50983]: pgmap v1091: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:34 vm00.local ceph-mon[49980]: pgmap v1092: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:34 vm03.local ceph-mon[50983]: pgmap v1092: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:51:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:51:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:51:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:51:36.469 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:36.469 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:36.495 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:36.496 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:36 vm00.local ceph-mon[49980]: pgmap v1093: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:36 vm03.local ceph-mon[50983]: pgmap v1093: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:38 vm00.local ceph-mon[49980]: pgmap v1094: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:38 vm03.local ceph-mon[50983]: pgmap v1094: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:40 vm00.local ceph-mon[49980]: pgmap v1095: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:40 vm03.local ceph-mon[50983]: pgmap v1095: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:41.497 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:41.498 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:41.524 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:41.524 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:42 vm00.local ceph-mon[49980]: pgmap v1096: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:42 vm03.local ceph-mon[50983]: pgmap v1096: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:44 vm00.local ceph-mon[49980]: pgmap v1097: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:44 vm03.local ceph-mon[50983]: pgmap v1097: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:46.526 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:46.526 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:46.552 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:46.553 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:46 vm00.local ceph-mon[49980]: pgmap v1098: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:46 vm03.local ceph-mon[50983]: pgmap v1098: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:48 vm00.local ceph-mon[49980]: pgmap v1099: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:48 vm03.local ceph-mon[50983]: pgmap v1099: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:50.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:50 vm00.local ceph-mon[49980]: pgmap v1100: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:50.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:51:50.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:51:50.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:51:50.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:51:50.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:50 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:51:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:50 vm03.local ceph-mon[50983]: pgmap v1100: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:51:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:51:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:51:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:51:50.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:50 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:51:51.554 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:51.555 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:51.584 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:51.584 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:52 vm00.local ceph-mon[49980]: pgmap v1101: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:52 vm03.local ceph-mon[50983]: pgmap v1101: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:54 vm00.local ceph-mon[49980]: pgmap v1102: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:54 vm03.local ceph-mon[50983]: pgmap v1102: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:56 vm03.local ceph-mon[50983]: pgmap v1103: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:56.585 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:51:56.586 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:51:56.611 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:51:56.611 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:51:56.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:56 vm00.local ceph-mon[49980]: pgmap v1103: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:51:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:51:58 vm00.local ceph-mon[49980]: pgmap v1104: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:51:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:51:58 vm03.local ceph-mon[50983]: pgmap v1104: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:00 vm00.local ceph-mon[49980]: pgmap v1105: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:00 vm03.local ceph-mon[50983]: pgmap v1105: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:01.612 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:01.613 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:01.638 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:01.639 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:02 vm00.local ceph-mon[49980]: pgmap v1106: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:02 vm03.local ceph-mon[50983]: pgmap v1106: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:04 vm00.local ceph-mon[49980]: pgmap v1107: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:04 vm03.local ceph-mon[50983]: pgmap v1107: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:06.641 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:06.641 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:06.779 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:06.779 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:06 vm00.local ceph-mon[49980]: pgmap v1108: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:06 vm03.local ceph-mon[50983]: pgmap v1108: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:08 vm00.local ceph-mon[49980]: pgmap v1109: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:08 vm03.local ceph-mon[50983]: pgmap v1109: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:10 vm03.local ceph-mon[50983]: pgmap v1110: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:10 vm00.local ceph-mon[49980]: pgmap v1110: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:11.781 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:11.782 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:11.809 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:11.809 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:12 vm03.local ceph-mon[50983]: pgmap v1111: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:13.082 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:12 vm00.local ceph-mon[49980]: pgmap v1111: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:14 vm00.local ceph-mon[49980]: pgmap v1112: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:14 vm03.local ceph-mon[50983]: pgmap v1112: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:16 vm00.local ceph-mon[49980]: pgmap v1113: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:16 vm03.local ceph-mon[50983]: pgmap v1113: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:16.811 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:16.811 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:16.837 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:16.837 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:18 vm00.local ceph-mon[49980]: pgmap v1114: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:18 vm03.local ceph-mon[50983]: pgmap v1114: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:20 vm00.local ceph-mon[49980]: pgmap v1115: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:20 vm03.local ceph-mon[50983]: pgmap v1115: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:21.839 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:21.839 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:21.867 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:21.867 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:22 vm00.local ceph-mon[49980]: pgmap v1116: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:22 vm03.local ceph-mon[50983]: pgmap v1116: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:24 vm00.local ceph-mon[49980]: pgmap v1117: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:24 vm03.local ceph-mon[50983]: pgmap v1117: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:26 vm00.local ceph-mon[49980]: pgmap v1118: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:26 vm03.local ceph-mon[50983]: pgmap v1118: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:26.869 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:26.869 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:26.896 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:26.896 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:28 vm00.local ceph-mon[49980]: pgmap v1119: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:28 vm03.local ceph-mon[50983]: pgmap v1119: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:30 vm00.local ceph-mon[49980]: pgmap v1120: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:30 vm03.local ceph-mon[50983]: pgmap v1120: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:31.898 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:31.898 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:31.924 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:31.924 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:32 vm00.local ceph-mon[49980]: pgmap v1121: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:32 vm03.local ceph-mon[50983]: pgmap v1121: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:34 vm00.local ceph-mon[49980]: pgmap v1122: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:34 vm03.local ceph-mon[50983]: pgmap v1122: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:52:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:52:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:52:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:52:36.926 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:36.926 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:37.014 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:37.014 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:36 vm00.local ceph-mon[49980]: pgmap v1123: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:36 vm03.local ceph-mon[50983]: pgmap v1123: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:38 vm00.local ceph-mon[49980]: pgmap v1124: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:38 vm03.local ceph-mon[50983]: pgmap v1124: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:40 vm00.local ceph-mon[49980]: pgmap v1125: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:40 vm03.local ceph-mon[50983]: pgmap v1125: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:42.016 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:42.016 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:42.043 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:42.044 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:42 vm00.local ceph-mon[49980]: pgmap v1126: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:42 vm03.local ceph-mon[50983]: pgmap v1126: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:44 vm00.local ceph-mon[49980]: pgmap v1127: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:44 vm03.local ceph-mon[50983]: pgmap v1127: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:46 vm00.local ceph-mon[49980]: pgmap v1128: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:46 vm03.local ceph-mon[50983]: pgmap v1128: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:47.045 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:47.054 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:47.251 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:47.251 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:49.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:49 vm03.local ceph-mon[50983]: pgmap v1129: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:49 vm00.local ceph-mon[49980]: pgmap v1129: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:51 vm00.local ceph-mon[49980]: pgmap v1130: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:52:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:52:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:52:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:52:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:52:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:51 vm03.local ceph-mon[50983]: pgmap v1130: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:52:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:52:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:52:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:52:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:52:52.253 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:52.253 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:52.281 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:52.282 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:53 vm00.local ceph-mon[49980]: pgmap v1131: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:53 vm03.local ceph-mon[50983]: pgmap v1131: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:55.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:55 vm00.local ceph-mon[49980]: pgmap v1132: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:55 vm03.local ceph-mon[50983]: pgmap v1132: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:56.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:56 vm00.local ceph-mon[49980]: pgmap v1133: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:56 vm03.local ceph-mon[50983]: pgmap v1133: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:52:57.283 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:52:57.284 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:52:57.309 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:52:57.310 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:52:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:52:58 vm00.local ceph-mon[49980]: pgmap v1134: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:52:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:52:58 vm03.local ceph-mon[50983]: pgmap v1134: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:00 vm00.local ceph-mon[49980]: pgmap v1135: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:00 vm03.local ceph-mon[50983]: pgmap v1135: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:02.311 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:02.312 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:02.338 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:02.338 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:02 vm00.local ceph-mon[49980]: pgmap v1136: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:02 vm03.local ceph-mon[50983]: pgmap v1136: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:04 vm00.local ceph-mon[49980]: pgmap v1137: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:04 vm03.local ceph-mon[50983]: pgmap v1137: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:06 vm00.local ceph-mon[49980]: pgmap v1138: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:06 vm03.local ceph-mon[50983]: pgmap v1138: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:07.339 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:07.340 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:07.365 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:07.366 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:08 vm00.local ceph-mon[49980]: pgmap v1139: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:08 vm03.local ceph-mon[50983]: pgmap v1139: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:10 vm00.local ceph-mon[49980]: pgmap v1140: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:10 vm03.local ceph-mon[50983]: pgmap v1140: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:12.367 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:12.367 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:12.392 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:12.392 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:12 vm00.local ceph-mon[49980]: pgmap v1141: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:12 vm03.local ceph-mon[50983]: pgmap v1141: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:14 vm00.local ceph-mon[49980]: pgmap v1142: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:14 vm03.local ceph-mon[50983]: pgmap v1142: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:16 vm00.local ceph-mon[49980]: pgmap v1143: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:16 vm03.local ceph-mon[50983]: pgmap v1143: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:17.393 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:17.394 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:17.421 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:17.421 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:18 vm00.local ceph-mon[49980]: pgmap v1144: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:18 vm03.local ceph-mon[50983]: pgmap v1144: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:20 vm00.local ceph-mon[49980]: pgmap v1145: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:20 vm03.local ceph-mon[50983]: pgmap v1145: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:22.422 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:22.423 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:22.451 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:22.451 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:22 vm00.local ceph-mon[49980]: pgmap v1146: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:22 vm03.local ceph-mon[50983]: pgmap v1146: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:24 vm00.local ceph-mon[49980]: pgmap v1147: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:24 vm03.local ceph-mon[50983]: pgmap v1147: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:26 vm00.local ceph-mon[49980]: pgmap v1148: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:26 vm03.local ceph-mon[50983]: pgmap v1148: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:27.452 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:27.453 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:27.480 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:27.480 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:28 vm00.local ceph-mon[49980]: pgmap v1149: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:28 vm03.local ceph-mon[50983]: pgmap v1149: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:30 vm00.local ceph-mon[49980]: pgmap v1150: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:53:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:30 vm03.local ceph-mon[50983]: pgmap v1150: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:53:32.482 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:32.482 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:32.507 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:32.508 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:32 vm00.local ceph-mon[49980]: pgmap v1151: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:53:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:32 vm03.local ceph-mon[50983]: pgmap v1151: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:53:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:34 vm00.local ceph-mon[49980]: pgmap v1152: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:34 vm03.local ceph-mon[50983]: pgmap v1152: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:53:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:53:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:53:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:53:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:36 vm00.local ceph-mon[49980]: pgmap v1153: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:53:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:36 vm03.local ceph-mon[50983]: pgmap v1153: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T05:53:37.509 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:37.510 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:37.535 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:37.536 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:38 vm00.local ceph-mon[49980]: pgmap v1154: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:38 vm03.local ceph-mon[50983]: pgmap v1154: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:40 vm00.local ceph-mon[49980]: pgmap v1155: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:40 vm03.local ceph-mon[50983]: pgmap v1155: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:42.537 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:42.538 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:42.564 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:42.564 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:42 vm00.local ceph-mon[49980]: pgmap v1156: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:42 vm03.local ceph-mon[50983]: pgmap v1156: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:44 vm00.local ceph-mon[49980]: pgmap v1157: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:44 vm03.local ceph-mon[50983]: pgmap v1157: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:46 vm00.local ceph-mon[49980]: pgmap v1158: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:46 vm03.local ceph-mon[50983]: pgmap v1158: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:47.566 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:47.567 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:47.592 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:47.593 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:48 vm00.local ceph-mon[49980]: pgmap v1159: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:48 vm03.local ceph-mon[50983]: pgmap v1159: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:50.862 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:50 vm00.local ceph-mon[49980]: pgmap v1160: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:51.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:50 vm03.local ceph-mon[50983]: pgmap v1160: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:53:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:53:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:53:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:53:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:53:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:53:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:53:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:53:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:53:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:53:52.594 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:52.595 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:52.620 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:52.621 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:53 vm03.local ceph-mon[50983]: pgmap v1161: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:53 vm00.local ceph-mon[49980]: pgmap v1161: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:55 vm03.local ceph-mon[50983]: pgmap v1162: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:55 vm00.local ceph-mon[49980]: pgmap v1162: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:56.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:56 vm00.local ceph-mon[49980]: pgmap v1163: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:56 vm03.local ceph-mon[50983]: pgmap v1163: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:53:57.622 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:53:57.622 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:53:57.647 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:53:57.648 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:53:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:53:58 vm00.local ceph-mon[49980]: pgmap v1164: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:53:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:53:58 vm03.local ceph-mon[50983]: pgmap v1164: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:00 vm00.local ceph-mon[49980]: pgmap v1165: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:00 vm03.local ceph-mon[50983]: pgmap v1165: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:02.649 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:02.650 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:02.677 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:02.678 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:02 vm00.local ceph-mon[49980]: pgmap v1166: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:02 vm03.local ceph-mon[50983]: pgmap v1166: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:04 vm00.local ceph-mon[49980]: pgmap v1167: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:04 vm03.local ceph-mon[50983]: pgmap v1167: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:06 vm00.local ceph-mon[49980]: pgmap v1168: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:06 vm03.local ceph-mon[50983]: pgmap v1168: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:07.679 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:07.680 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:07.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:07.707 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:08 vm00.local ceph-mon[49980]: pgmap v1169: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:08 vm03.local ceph-mon[50983]: pgmap v1169: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:10 vm00.local ceph-mon[49980]: pgmap v1170: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:10 vm03.local ceph-mon[50983]: pgmap v1170: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:12.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:12.708 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:12.734 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:12.735 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:12 vm00.local ceph-mon[49980]: pgmap v1171: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:12 vm03.local ceph-mon[50983]: pgmap v1171: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:14 vm00.local ceph-mon[49980]: pgmap v1172: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:14 vm03.local ceph-mon[50983]: pgmap v1172: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:16 vm00.local ceph-mon[49980]: pgmap v1173: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:16 vm03.local ceph-mon[50983]: pgmap v1173: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:17.736 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:17.737 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:17.762 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:17.763 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:18 vm00.local ceph-mon[49980]: pgmap v1174: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:18 vm03.local ceph-mon[50983]: pgmap v1174: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:20 vm00.local ceph-mon[49980]: pgmap v1175: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:20 vm03.local ceph-mon[50983]: pgmap v1175: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:22.764 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:22.765 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:22.794 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:22.794 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:22 vm00.local ceph-mon[49980]: pgmap v1176: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:22 vm03.local ceph-mon[50983]: pgmap v1176: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:24 vm00.local ceph-mon[49980]: pgmap v1177: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:24 vm03.local ceph-mon[50983]: pgmap v1177: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:26 vm00.local ceph-mon[49980]: pgmap v1178: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:26 vm03.local ceph-mon[50983]: pgmap v1178: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:27.796 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:27.796 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:27.823 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:27.823 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:28 vm00.local ceph-mon[49980]: pgmap v1179: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:28 vm03.local ceph-mon[50983]: pgmap v1179: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:30 vm00.local ceph-mon[49980]: pgmap v1180: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:30 vm03.local ceph-mon[50983]: pgmap v1180: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:32.825 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:32.825 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:32.852 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:32.853 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:32 vm00.local ceph-mon[49980]: pgmap v1181: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:32 vm03.local ceph-mon[50983]: pgmap v1181: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:34 vm00.local ceph-mon[49980]: pgmap v1182: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:34 vm03.local ceph-mon[50983]: pgmap v1182: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:54:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:54:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:54:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:54:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:36 vm00.local ceph-mon[49980]: pgmap v1183: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:36 vm03.local ceph-mon[50983]: pgmap v1183: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:37.854 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:37.855 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:37.880 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:37.880 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:38 vm00.local ceph-mon[49980]: pgmap v1184: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:38 vm03.local ceph-mon[50983]: pgmap v1184: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:40 vm00.local ceph-mon[49980]: pgmap v1185: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:40 vm03.local ceph-mon[50983]: pgmap v1185: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:42.882 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:42.882 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:42.910 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:42.911 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:42 vm00.local ceph-mon[49980]: pgmap v1186: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:42 vm03.local ceph-mon[50983]: pgmap v1186: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:44 vm00.local ceph-mon[49980]: pgmap v1187: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:44 vm03.local ceph-mon[50983]: pgmap v1187: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:46 vm00.local ceph-mon[49980]: pgmap v1188: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:46 vm03.local ceph-mon[50983]: pgmap v1188: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:47.912 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:47.913 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:47.939 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:47.940 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:48 vm00.local ceph-mon[49980]: pgmap v1189: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:48 vm03.local ceph-mon[50983]: pgmap v1189: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:50 vm00.local ceph-mon[49980]: pgmap v1190: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:50 vm03.local ceph-mon[50983]: pgmap v1190: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:54:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:54:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:54:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:54:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:54:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:54:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:54:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:54:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:54:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:54:52.941 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:52.942 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:52.968 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:52.969 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:52 vm00.local ceph-mon[49980]: pgmap v1191: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:52 vm03.local ceph-mon[50983]: pgmap v1191: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:54 vm00.local ceph-mon[49980]: pgmap v1192: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:54 vm03.local ceph-mon[50983]: pgmap v1192: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:56.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:56 vm00.local ceph-mon[49980]: pgmap v1193: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:56 vm03.local ceph-mon[50983]: pgmap v1193: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:54:57.970 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:54:57.971 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:54:57.996 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:54:57.997 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:54:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:54:58 vm00.local ceph-mon[49980]: pgmap v1194: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:54:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:54:58 vm03.local ceph-mon[50983]: pgmap v1194: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:00 vm00.local ceph-mon[49980]: pgmap v1195: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:00 vm03.local ceph-mon[50983]: pgmap v1195: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:02.998 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:02.999 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:03.025 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:03.026 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:02 vm00.local ceph-mon[49980]: pgmap v1196: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:02 vm03.local ceph-mon[50983]: pgmap v1196: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:04 vm00.local ceph-mon[49980]: pgmap v1197: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:04 vm03.local ceph-mon[50983]: pgmap v1197: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:06 vm00.local ceph-mon[49980]: pgmap v1198: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:06 vm03.local ceph-mon[50983]: pgmap v1198: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:08.027 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:08.027 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:08.053 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:08.053 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:08 vm00.local ceph-mon[49980]: pgmap v1199: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:08 vm03.local ceph-mon[50983]: pgmap v1199: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:10 vm00.local ceph-mon[49980]: pgmap v1200: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:10 vm03.local ceph-mon[50983]: pgmap v1200: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:12 vm00.local ceph-mon[49980]: pgmap v1201: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:13.054 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:13.055 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:12 vm03.local ceph-mon[50983]: pgmap v1201: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:13.080 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:13.081 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:14 vm00.local ceph-mon[49980]: pgmap v1202: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:14 vm03.local ceph-mon[50983]: pgmap v1202: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:16 vm00.local ceph-mon[49980]: pgmap v1203: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:16 vm03.local ceph-mon[50983]: pgmap v1203: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:18.082 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:18.082 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:18.107 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:18.107 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:18 vm00.local ceph-mon[49980]: pgmap v1204: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:18 vm03.local ceph-mon[50983]: pgmap v1204: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:20 vm00.local ceph-mon[49980]: pgmap v1205: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:20 vm03.local ceph-mon[50983]: pgmap v1205: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:22 vm00.local ceph-mon[49980]: pgmap v1206: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:22 vm03.local ceph-mon[50983]: pgmap v1206: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:23.109 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:23.109 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:23.135 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:23.136 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:24 vm00.local ceph-mon[49980]: pgmap v1207: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:24 vm03.local ceph-mon[50983]: pgmap v1207: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:26 vm00.local ceph-mon[49980]: pgmap v1208: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:26 vm03.local ceph-mon[50983]: pgmap v1208: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:28.137 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:28.138 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:28.164 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:28.164 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:28 vm00.local ceph-mon[49980]: pgmap v1209: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:28 vm03.local ceph-mon[50983]: pgmap v1209: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:30 vm00.local ceph-mon[49980]: pgmap v1210: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:30 vm03.local ceph-mon[50983]: pgmap v1210: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:32 vm00.local ceph-mon[49980]: pgmap v1211: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:32 vm03.local ceph-mon[50983]: pgmap v1211: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:33.165 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:33.166 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:33.192 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:33.192 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:34 vm00.local ceph-mon[49980]: pgmap v1212: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:34 vm03.local ceph-mon[50983]: pgmap v1212: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:55:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:55:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:55:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:55:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:36 vm00.local ceph-mon[49980]: pgmap v1213: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:36 vm03.local ceph-mon[50983]: pgmap v1213: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:38.194 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:38.194 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:38.219 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:38.220 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:38 vm00.local ceph-mon[49980]: pgmap v1214: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:38 vm03.local ceph-mon[50983]: pgmap v1214: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:40 vm00.local ceph-mon[49980]: pgmap v1215: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:40 vm03.local ceph-mon[50983]: pgmap v1215: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:42 vm00.local ceph-mon[49980]: pgmap v1216: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:42 vm03.local ceph-mon[50983]: pgmap v1216: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:43.221 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:43.221 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:43.248 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:43.248 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:44 vm00.local ceph-mon[49980]: pgmap v1217: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:44 vm03.local ceph-mon[50983]: pgmap v1217: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:46 vm00.local ceph-mon[49980]: pgmap v1218: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:46 vm03.local ceph-mon[50983]: pgmap v1218: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:48.249 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:48.250 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:48.275 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:48.275 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:48 vm00.local ceph-mon[49980]: pgmap v1219: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:48 vm03.local ceph-mon[50983]: pgmap v1219: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:50 vm00.local ceph-mon[49980]: pgmap v1220: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:50 vm03.local ceph-mon[50983]: pgmap v1220: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:51.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:55:51.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:55:51.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:55:51.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:55:51.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:55:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:55:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:55:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:55:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:55:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:55:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:52 vm00.local ceph-mon[49980]: pgmap v1221: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:52 vm03.local ceph-mon[50983]: pgmap v1221: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:53.277 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:53.277 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:53.305 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:53.305 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:54 vm00.local ceph-mon[49980]: pgmap v1222: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:54 vm03.local ceph-mon[50983]: pgmap v1222: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:56.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:56 vm00.local ceph-mon[49980]: pgmap v1223: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:56 vm03.local ceph-mon[50983]: pgmap v1223: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:55:58.307 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:55:58.307 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:55:58.332 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:55:58.333 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:55:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:55:58 vm00.local ceph-mon[49980]: pgmap v1224: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:55:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:55:58 vm03.local ceph-mon[50983]: pgmap v1224: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:00 vm00.local ceph-mon[49980]: pgmap v1225: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:00 vm03.local ceph-mon[50983]: pgmap v1225: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:02 vm00.local ceph-mon[49980]: pgmap v1226: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:02 vm03.local ceph-mon[50983]: pgmap v1226: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:03.334 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:03.334 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:03.360 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:03.361 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:04 vm00.local ceph-mon[49980]: pgmap v1227: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:04 vm03.local ceph-mon[50983]: pgmap v1227: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:06 vm00.local ceph-mon[49980]: pgmap v1228: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:06 vm03.local ceph-mon[50983]: pgmap v1228: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:08.362 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:08.363 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:08.389 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:08.390 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:08 vm00.local ceph-mon[49980]: pgmap v1229: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:08 vm03.local ceph-mon[50983]: pgmap v1229: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:10 vm00.local ceph-mon[49980]: pgmap v1230: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:10 vm03.local ceph-mon[50983]: pgmap v1230: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:12 vm00.local ceph-mon[49980]: pgmap v1231: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:12 vm03.local ceph-mon[50983]: pgmap v1231: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:13.391 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:13.391 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:13.417 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:13.417 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:14 vm00.local ceph-mon[49980]: pgmap v1232: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:14 vm03.local ceph-mon[50983]: pgmap v1232: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:16 vm00.local ceph-mon[49980]: pgmap v1233: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:16 vm03.local ceph-mon[50983]: pgmap v1233: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:18.419 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:18.419 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:18.446 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:18.446 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:18 vm00.local ceph-mon[49980]: pgmap v1234: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:18 vm03.local ceph-mon[50983]: pgmap v1234: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:20 vm00.local ceph-mon[49980]: pgmap v1235: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:20 vm03.local ceph-mon[50983]: pgmap v1235: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:22 vm00.local ceph-mon[49980]: pgmap v1236: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:22 vm03.local ceph-mon[50983]: pgmap v1236: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:23.447 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:23.448 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:23.474 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:23.475 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:24 vm00.local ceph-mon[49980]: pgmap v1237: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:24 vm03.local ceph-mon[50983]: pgmap v1237: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:26 vm00.local ceph-mon[49980]: pgmap v1238: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:26 vm03.local ceph-mon[50983]: pgmap v1238: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:28.476 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:28.476 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:28.502 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:28.503 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:28 vm00.local ceph-mon[49980]: pgmap v1239: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:28 vm03.local ceph-mon[50983]: pgmap v1239: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:30 vm00.local ceph-mon[49980]: pgmap v1240: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:30 vm03.local ceph-mon[50983]: pgmap v1240: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:32 vm00.local ceph-mon[49980]: pgmap v1241: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:32 vm03.local ceph-mon[50983]: pgmap v1241: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:33.504 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:33.504 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:33.529 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:33.529 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:34 vm00.local ceph-mon[49980]: pgmap v1242: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:34 vm03.local ceph-mon[50983]: pgmap v1242: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:56:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:56:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:56:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:56:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:36 vm00.local ceph-mon[49980]: pgmap v1243: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:36 vm03.local ceph-mon[50983]: pgmap v1243: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:38.531 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:38.531 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:38.556 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:38.556 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:38 vm00.local ceph-mon[49980]: pgmap v1244: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:38 vm03.local ceph-mon[50983]: pgmap v1244: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:40 vm00.local ceph-mon[49980]: pgmap v1245: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:40 vm03.local ceph-mon[50983]: pgmap v1245: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:42 vm00.local ceph-mon[49980]: pgmap v1246: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:42 vm03.local ceph-mon[50983]: pgmap v1246: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:43.557 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:43.558 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:43.582 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:43.582 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:44 vm00.local ceph-mon[49980]: pgmap v1247: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:44 vm03.local ceph-mon[50983]: pgmap v1247: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:46 vm00.local ceph-mon[49980]: pgmap v1248: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:46 vm03.local ceph-mon[50983]: pgmap v1248: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:48.584 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:48.584 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:48.609 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:48.610 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:48 vm00.local ceph-mon[49980]: pgmap v1249: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:48 vm03.local ceph-mon[50983]: pgmap v1249: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:50 vm00.local ceph-mon[49980]: pgmap v1250: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:50 vm03.local ceph-mon[50983]: pgmap v1250: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:51.936 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:56:51.937 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:56:51.937 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:51 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:56:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:56:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:56:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:51 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:56:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:52 vm00.local ceph-mon[49980]: pgmap v1251: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:56:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:56:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:56:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:52 vm03.local ceph-mon[50983]: pgmap v1251: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:56:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:56:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:56:53.611 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:53.611 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:53.637 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:53.638 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:54 vm00.local ceph-mon[49980]: pgmap v1252: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:54 vm03.local ceph-mon[50983]: pgmap v1252: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:56.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:56 vm00.local ceph-mon[49980]: pgmap v1253: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:56 vm03.local ceph-mon[50983]: pgmap v1253: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:56:58.639 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:56:58.640 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:56:58.666 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:56:58.666 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:56:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:56:58 vm00.local ceph-mon[49980]: pgmap v1254: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:56:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:56:58 vm03.local ceph-mon[50983]: pgmap v1254: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:00 vm00.local ceph-mon[49980]: pgmap v1255: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:00 vm03.local ceph-mon[50983]: pgmap v1255: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:02 vm00.local ceph-mon[49980]: pgmap v1256: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:02 vm03.local ceph-mon[50983]: pgmap v1256: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:03.667 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:03.668 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:03.695 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:03.696 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:04 vm00.local ceph-mon[49980]: pgmap v1257: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:04 vm03.local ceph-mon[50983]: pgmap v1257: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:06 vm00.local ceph-mon[49980]: pgmap v1258: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:06 vm03.local ceph-mon[50983]: pgmap v1258: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:08.697 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:08.697 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:08.724 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:08.724 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:08 vm00.local ceph-mon[49980]: pgmap v1259: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:08 vm03.local ceph-mon[50983]: pgmap v1259: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:10 vm00.local ceph-mon[49980]: pgmap v1260: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:10 vm03.local ceph-mon[50983]: pgmap v1260: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:12 vm00.local ceph-mon[49980]: pgmap v1261: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:12 vm03.local ceph-mon[50983]: pgmap v1261: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:13.726 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:13.726 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:13.751 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:13.751 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:14 vm00.local ceph-mon[49980]: pgmap v1262: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:14 vm03.local ceph-mon[50983]: pgmap v1262: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:16.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:16 vm00.local ceph-mon[49980]: pgmap v1263: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:16 vm03.local ceph-mon[50983]: pgmap v1263: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:18.753 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:18.753 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:18.778 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:18.779 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:18 vm00.local ceph-mon[49980]: pgmap v1264: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:18 vm03.local ceph-mon[50983]: pgmap v1264: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:20 vm00.local ceph-mon[49980]: pgmap v1265: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:20 vm03.local ceph-mon[50983]: pgmap v1265: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:22 vm00.local ceph-mon[49980]: pgmap v1266: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:22 vm03.local ceph-mon[50983]: pgmap v1266: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:23.780 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:23.781 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:23.805 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:23.806 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:24 vm00.local ceph-mon[49980]: pgmap v1267: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:24 vm03.local ceph-mon[50983]: pgmap v1267: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:26 vm00.local ceph-mon[49980]: pgmap v1268: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:26 vm03.local ceph-mon[50983]: pgmap v1268: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:28.807 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:28.808 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:28.833 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:28.833 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:28 vm00.local ceph-mon[49980]: pgmap v1269: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:28 vm03.local ceph-mon[50983]: pgmap v1269: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:30 vm00.local ceph-mon[49980]: pgmap v1270: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:30 vm03.local ceph-mon[50983]: pgmap v1270: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:32 vm00.local ceph-mon[49980]: pgmap v1271: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:32 vm03.local ceph-mon[50983]: pgmap v1271: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:33.835 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:33.835 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:33.860 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:33.860 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:34 vm00.local ceph-mon[49980]: pgmap v1272: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:34 vm03.local ceph-mon[50983]: pgmap v1272: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:57:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:57:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:57:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:57:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:36 vm00.local ceph-mon[49980]: pgmap v1273: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:36 vm03.local ceph-mon[50983]: pgmap v1273: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:38.862 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:38.862 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:38.888 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:38.888 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:38 vm00.local ceph-mon[49980]: pgmap v1274: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:38 vm03.local ceph-mon[50983]: pgmap v1274: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:40 vm00.local ceph-mon[49980]: pgmap v1275: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:40 vm03.local ceph-mon[50983]: pgmap v1275: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:42 vm00.local ceph-mon[49980]: pgmap v1276: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:42 vm03.local ceph-mon[50983]: pgmap v1276: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:43.890 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:43.890 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:43.982 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:43.983 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:44 vm00.local ceph-mon[49980]: pgmap v1277: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:44 vm03.local ceph-mon[50983]: pgmap v1277: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:46.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:46 vm00.local ceph-mon[49980]: pgmap v1278: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:46 vm03.local ceph-mon[50983]: pgmap v1278: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:48.984 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:48.985 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:49.012 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:49.012 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:48 vm00.local ceph-mon[49980]: pgmap v1279: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:48 vm03.local ceph-mon[50983]: pgmap v1279: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:50 vm00.local ceph-mon[49980]: pgmap v1280: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:50 vm03.local ceph-mon[50983]: pgmap v1280: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:52.772 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: pgmap v1281: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:52.773 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:57:52.773 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:57:52.773 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:57:52.773 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:57:52.773 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:57:52.773 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:57:52.773 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:57:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: pgmap v1281: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:57:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:57:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:57:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:57:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:57:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:57:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:57:54.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:54.014 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:54.039 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:54.039 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:57:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:54 vm00.local ceph-mon[49980]: pgmap v1282: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:54 vm03.local ceph-mon[50983]: pgmap v1282: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:56 vm03.local ceph-mon[50983]: pgmap v1283: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:56 vm00.local ceph-mon[49980]: pgmap v1283: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:57:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:57:58 vm00.local ceph-mon[49980]: pgmap v1284: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:59.041 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:57:59.041 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:57:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:57:58 vm03.local ceph-mon[50983]: pgmap v1284: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:57:59.066 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:57:59.067 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:00 vm00.local ceph-mon[49980]: pgmap v1285: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:00 vm03.local ceph-mon[50983]: pgmap v1285: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:02 vm00.local ceph-mon[49980]: pgmap v1286: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:02 vm03.local ceph-mon[50983]: pgmap v1286: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:04.068 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:04.069 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:04.094 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:04.094 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:04 vm00.local ceph-mon[49980]: pgmap v1287: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:04 vm03.local ceph-mon[50983]: pgmap v1287: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:06 vm03.local ceph-mon[50983]: pgmap v1288: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:06 vm00.local ceph-mon[49980]: pgmap v1288: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:08 vm00.local ceph-mon[49980]: pgmap v1289: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:08 vm03.local ceph-mon[50983]: pgmap v1289: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:09.096 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:09.096 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:09.122 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:09.123 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:10 vm00.local ceph-mon[49980]: pgmap v1290: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:10 vm03.local ceph-mon[50983]: pgmap v1290: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:12 vm00.local ceph-mon[49980]: pgmap v1291: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:12 vm03.local ceph-mon[50983]: pgmap v1291: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:14.124 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:14.124 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:14.155 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:14.155 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:14 vm00.local ceph-mon[49980]: pgmap v1292: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:14 vm03.local ceph-mon[50983]: pgmap v1292: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:16.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:16 vm03.local ceph-mon[50983]: pgmap v1293: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:17.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:16 vm00.local ceph-mon[49980]: pgmap v1293: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:19.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:18 vm00.local ceph-mon[49980]: pgmap v1294: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:18 vm03.local ceph-mon[50983]: pgmap v1294: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:19.156 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:19.157 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:19.203 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:19.203 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:20 vm00.local ceph-mon[49980]: pgmap v1295: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:20 vm03.local ceph-mon[50983]: pgmap v1295: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:22 vm00.local ceph-mon[49980]: pgmap v1296: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:22 vm03.local ceph-mon[50983]: pgmap v1296: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:24.204 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:24.205 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:24.231 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:24.232 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:24 vm00.local ceph-mon[49980]: pgmap v1297: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:24 vm03.local ceph-mon[50983]: pgmap v1297: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:26 vm00.local ceph-mon[49980]: pgmap v1298: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:26 vm03.local ceph-mon[50983]: pgmap v1298: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:29.234 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:29.234 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:29.261 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:29.262 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:28 vm00.local ceph-mon[49980]: pgmap v1299: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:28 vm03.local ceph-mon[50983]: pgmap v1299: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:30 vm00.local ceph-mon[49980]: pgmap v1300: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:30 vm03.local ceph-mon[50983]: pgmap v1300: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:32 vm00.local ceph-mon[49980]: pgmap v1301: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:32 vm03.local ceph-mon[50983]: pgmap v1301: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:34.263 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:34.264 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:34.290 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:34.290 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:34 vm00.local ceph-mon[49980]: pgmap v1302: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:34 vm03.local ceph-mon[50983]: pgmap v1302: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:58:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:58:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:58:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:58:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:36 vm00.local ceph-mon[49980]: pgmap v1303: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:36 vm03.local ceph-mon[50983]: pgmap v1303: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:38 vm00.local ceph-mon[49980]: pgmap v1304: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:39.292 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:39.292 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:38 vm03.local ceph-mon[50983]: pgmap v1304: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:39.318 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:39.318 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:40 vm00.local ceph-mon[49980]: pgmap v1305: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:40 vm03.local ceph-mon[50983]: pgmap v1305: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:42 vm00.local ceph-mon[49980]: pgmap v1306: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:42 vm03.local ceph-mon[50983]: pgmap v1306: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:44.319 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:44.320 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:44.344 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:44.345 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:44 vm00.local ceph-mon[49980]: pgmap v1307: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:44 vm03.local ceph-mon[50983]: pgmap v1307: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:46.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:46 vm03.local ceph-mon[50983]: pgmap v1308: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:47.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:46 vm00.local ceph-mon[49980]: pgmap v1308: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:48 vm00.local ceph-mon[49980]: pgmap v1309: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:48 vm03.local ceph-mon[50983]: pgmap v1309: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:49.346 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:49.347 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:49.373 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:49.373 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:50 vm00.local ceph-mon[49980]: pgmap v1310: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:50 vm03.local ceph-mon[50983]: pgmap v1310: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:52.972 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:52 vm03.local ceph-mon[50983]: pgmap v1311: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:52.972 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:58:52.972 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:58:52.972 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:52 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:58:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:52 vm00.local ceph-mon[49980]: pgmap v1311: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:58:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:58:53.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:52 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:58:54.375 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:54.375 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:54.401 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:54.401 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:58:54.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:54.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:58:54.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:54 vm00.local ceph-mon[49980]: pgmap v1312: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:54.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:54.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:54.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:54.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T05:58:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:54 vm03.local ceph-mon[50983]: pgmap v1312: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:54.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:58:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:56 vm00.local ceph-mon[49980]: pgmap v1313: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:56 vm03.local ceph-mon[50983]: pgmap v1313: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:58:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:58:59 vm00.local ceph-mon[49980]: pgmap v1314: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:58:59 vm03.local ceph-mon[50983]: pgmap v1314: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:58:59.403 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:58:59.403 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:58:59.430 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:58:59.430 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:01 vm03.local ceph-mon[50983]: pgmap v1315: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:01 vm00.local ceph-mon[49980]: pgmap v1315: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:03 vm03.local ceph-mon[50983]: pgmap v1316: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:03 vm00.local ceph-mon[49980]: pgmap v1316: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:04.431 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:04.432 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:04.458 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:04.458 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:05 vm00.local ceph-mon[49980]: pgmap v1317: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:05 vm03.local ceph-mon[50983]: pgmap v1317: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:07 vm00.local ceph-mon[49980]: pgmap v1318: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:07 vm03.local ceph-mon[50983]: pgmap v1318: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:09.460 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:09.460 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:09.486 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:09.487 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:09 vm00.local ceph-mon[49980]: pgmap v1319: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:09 vm03.local ceph-mon[50983]: pgmap v1319: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:11 vm00.local ceph-mon[49980]: pgmap v1320: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:11 vm03.local ceph-mon[50983]: pgmap v1320: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:13.342 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:13 vm00.local ceph-mon[49980]: pgmap v1321: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:13 vm03.local ceph-mon[50983]: pgmap v1321: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:14.489 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:14.489 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:14.516 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:14.516 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:15 vm00.local ceph-mon[49980]: pgmap v1322: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:15 vm03.local ceph-mon[50983]: pgmap v1322: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:17.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:17 vm00.local ceph-mon[49980]: pgmap v1323: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:17 vm03.local ceph-mon[50983]: pgmap v1323: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:19.517 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:19.518 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:19 vm00.local ceph-mon[49980]: pgmap v1324: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:19.543 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:19.544 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:19.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:19 vm03.local ceph-mon[50983]: pgmap v1324: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:21 vm00.local ceph-mon[49980]: pgmap v1325: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:21 vm03.local ceph-mon[50983]: pgmap v1325: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:23.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:23 vm00.local ceph-mon[49980]: pgmap v1326: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:23.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:23 vm03.local ceph-mon[50983]: pgmap v1326: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:24.545 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:24.546 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:24.571 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:24.572 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:25 vm03.local ceph-mon[50983]: pgmap v1327: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:25 vm00.local ceph-mon[49980]: pgmap v1327: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:27 vm03.local ceph-mon[50983]: pgmap v1328: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:27 vm00.local ceph-mon[49980]: pgmap v1328: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:29 vm03.local ceph-mon[50983]: pgmap v1329: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:29.573 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:29.574 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:29.599 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:29.600 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:29 vm00.local ceph-mon[49980]: pgmap v1329: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:31 vm03.local ceph-mon[50983]: pgmap v1330: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:31 vm00.local ceph-mon[49980]: pgmap v1330: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:33 vm00.local ceph-mon[49980]: pgmap v1331: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:33 vm03.local ceph-mon[50983]: pgmap v1331: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:34.601 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:34.602 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:34.626 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:34.627 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:35 vm00.local ceph-mon[49980]: pgmap v1332: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:59:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:59:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:35 vm03.local ceph-mon[50983]: pgmap v1332: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T05:59:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T05:59:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:36 vm00.local ceph-mon[49980]: pgmap v1333: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:36 vm03.local ceph-mon[50983]: pgmap v1333: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:38 vm00.local ceph-mon[49980]: pgmap v1334: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:38 vm03.local ceph-mon[50983]: pgmap v1334: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:39.628 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:39.629 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:39.655 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:39.656 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:40 vm00.local ceph-mon[49980]: pgmap v1335: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:40 vm03.local ceph-mon[50983]: pgmap v1335: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:42 vm00.local ceph-mon[49980]: pgmap v1336: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:42 vm03.local ceph-mon[50983]: pgmap v1336: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:44.657 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:44.658 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:44.683 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:44.684 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:44 vm00.local ceph-mon[49980]: pgmap v1337: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:44 vm03.local ceph-mon[50983]: pgmap v1337: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:47.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:46 vm00.local ceph-mon[49980]: pgmap v1338: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:46 vm03.local ceph-mon[50983]: pgmap v1338: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:49.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:48 vm00.local ceph-mon[49980]: pgmap v1339: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:48 vm03.local ceph-mon[50983]: pgmap v1339: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:49.685 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:49.686 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:49.713 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:49.713 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:50 vm00.local ceph-mon[49980]: pgmap v1340: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:50 vm03.local ceph-mon[50983]: pgmap v1340: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:52 vm00.local ceph-mon[49980]: pgmap v1341: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:52 vm03.local ceph-mon[50983]: pgmap v1341: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:54.715 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:54.715 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:54.740 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:54.741 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T05:59:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:54 vm00.local ceph-mon[49980]: pgmap v1342: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:59:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:59:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:59:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:59:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:54 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:59:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:54 vm03.local ceph-mon[50983]: pgmap v1342: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T05:59:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T05:59:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T05:59:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:59:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:54 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T05:59:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:56 vm00.local ceph-mon[49980]: pgmap v1343: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:56 vm03.local ceph-mon[50983]: pgmap v1343: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T05:59:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 05:59:58 vm00.local ceph-mon[49980]: pgmap v1344: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 05:59:58 vm03.local ceph-mon[50983]: pgmap v1344: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T05:59:59.742 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T05:59:59.743 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T05:59:59.769 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T05:59:59.769 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:00 vm00.local ceph-mon[49980]: pgmap v1345: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T06:00:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:00 vm03.local ceph-mon[50983]: pgmap v1345: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T06:00:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:02 vm00.local ceph-mon[49980]: pgmap v1346: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:02 vm03.local ceph-mon[50983]: pgmap v1346: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:04.771 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:04.771 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:04.797 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:04.797 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:04 vm03.local ceph-mon[50983]: pgmap v1347: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:04 vm00.local ceph-mon[49980]: pgmap v1347: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:06 vm03.local ceph-mon[50983]: pgmap v1348: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:06 vm00.local ceph-mon[49980]: pgmap v1348: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:08 vm03.local ceph-mon[50983]: pgmap v1349: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:08 vm00.local ceph-mon[49980]: pgmap v1349: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:09.798 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:09.799 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:09.825 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:09.826 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:10 vm03.local ceph-mon[50983]: pgmap v1350: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:10 vm00.local ceph-mon[49980]: pgmap v1350: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:12 vm03.local ceph-mon[50983]: pgmap v1351: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:13.082 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:12 vm00.local ceph-mon[49980]: pgmap v1351: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:14.827 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:14.828 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:14.854 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:14.855 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:14 vm00.local ceph-mon[49980]: pgmap v1352: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:14 vm03.local ceph-mon[50983]: pgmap v1352: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:16 vm00.local ceph-mon[49980]: pgmap v1353: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:16 vm03.local ceph-mon[50983]: pgmap v1353: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:18 vm00.local ceph-mon[49980]: pgmap v1354: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:18 vm03.local ceph-mon[50983]: pgmap v1354: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:19.856 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:19.856 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:19.882 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:19.882 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:20 vm00.local ceph-mon[49980]: pgmap v1355: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:20 vm03.local ceph-mon[50983]: pgmap v1355: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:22 vm00.local ceph-mon[49980]: pgmap v1356: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:22 vm03.local ceph-mon[50983]: pgmap v1356: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:24.884 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:24.884 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:24.910 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:24.911 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:24 vm00.local ceph-mon[49980]: pgmap v1357: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:24 vm03.local ceph-mon[50983]: pgmap v1357: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:26 vm00.local ceph-mon[49980]: pgmap v1358: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:26 vm03.local ceph-mon[50983]: pgmap v1358: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:28 vm00.local ceph-mon[49980]: pgmap v1359: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:28 vm03.local ceph-mon[50983]: pgmap v1359: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:29.912 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:29.913 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:29.940 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:29.940 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:30 vm00.local ceph-mon[49980]: pgmap v1360: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:30 vm03.local ceph-mon[50983]: pgmap v1360: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:32 vm00.local ceph-mon[49980]: pgmap v1361: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:32 vm03.local ceph-mon[50983]: pgmap v1361: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:34.942 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:34.942 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:34.968 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:34.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:34 vm00.local ceph-mon[49980]: pgmap v1362: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:34 vm03.local ceph-mon[50983]: pgmap v1362: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:00:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:00:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:00:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:00:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:36 vm00.local ceph-mon[49980]: pgmap v1363: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:36 vm03.local ceph-mon[50983]: pgmap v1363: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:38 vm00.local ceph-mon[49980]: pgmap v1364: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:38 vm03.local ceph-mon[50983]: pgmap v1364: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:39.969 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:39.970 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:39.997 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:39.997 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:40 vm00.local ceph-mon[49980]: pgmap v1365: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:40 vm03.local ceph-mon[50983]: pgmap v1365: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:42 vm00.local ceph-mon[49980]: pgmap v1366: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:42 vm03.local ceph-mon[50983]: pgmap v1366: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:44.999 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:44.999 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:45.024 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:45.025 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:44 vm00.local ceph-mon[49980]: pgmap v1367: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:44 vm03.local ceph-mon[50983]: pgmap v1367: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:46 vm00.local ceph-mon[49980]: pgmap v1368: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:46 vm03.local ceph-mon[50983]: pgmap v1368: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:49.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:49 vm03.local ceph-mon[50983]: pgmap v1369: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:49 vm00.local ceph-mon[49980]: pgmap v1369: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:50.029 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:50.030 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:50.057 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:50.057 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:51 vm00.local ceph-mon[49980]: pgmap v1370: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:51 vm03.local ceph-mon[50983]: pgmap v1370: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:53 vm00.local ceph-mon[49980]: pgmap v1371: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:53 vm03.local ceph-mon[50983]: pgmap v1371: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:55.059 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:00:55.059 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:00:55.086 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:00:55.089 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:00:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:55 vm00.local ceph-mon[49980]: pgmap v1372: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:00:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:00:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:00:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:00:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:00:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:55 vm03.local ceph-mon[50983]: pgmap v1372: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:00:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:00:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:00:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:00:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:00:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:57 vm00.local ceph-mon[49980]: pgmap v1373: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:57 vm03.local ceph-mon[50983]: pgmap v1373: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:00:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:00:59 vm00.local ceph-mon[49980]: pgmap v1374: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:00:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:00:59 vm03.local ceph-mon[50983]: pgmap v1374: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:00.091 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:00.091 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:00.122 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:00.123 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:01 vm03.local ceph-mon[50983]: pgmap v1375: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:01 vm00.local ceph-mon[49980]: pgmap v1375: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:03 vm00.local ceph-mon[49980]: pgmap v1376: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:03 vm03.local ceph-mon[50983]: pgmap v1376: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:05.125 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:05.125 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:05.152 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:05.153 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:05 vm00.local ceph-mon[49980]: pgmap v1377: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:05 vm03.local ceph-mon[50983]: pgmap v1377: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:07 vm00.local ceph-mon[49980]: pgmap v1378: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:07 vm03.local ceph-mon[50983]: pgmap v1378: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:09 vm00.local ceph-mon[49980]: pgmap v1379: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:09 vm03.local ceph-mon[50983]: pgmap v1379: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:10.155 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:10.156 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:10.182 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:10.183 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:11 vm03.local ceph-mon[50983]: pgmap v1380: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:11 vm00.local ceph-mon[49980]: pgmap v1380: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:13.339 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:13 vm00.local ceph-mon[49980]: pgmap v1381: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:13 vm03.local ceph-mon[50983]: pgmap v1381: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:15.185 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:15.185 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:15.211 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:15.212 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:15 vm00.local ceph-mon[49980]: pgmap v1382: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:15 vm03.local ceph-mon[50983]: pgmap v1382: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:17 vm00.local ceph-mon[49980]: pgmap v1383: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:17 vm03.local ceph-mon[50983]: pgmap v1383: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:19 vm03.local ceph-mon[50983]: pgmap v1384: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:19 vm00.local ceph-mon[49980]: pgmap v1384: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:20.213 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:20.214 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:20.243 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:20.243 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:21 vm03.local ceph-mon[50983]: pgmap v1385: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:21 vm00.local ceph-mon[49980]: pgmap v1385: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:23 vm00.local ceph-mon[49980]: pgmap v1386: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:23 vm03.local ceph-mon[50983]: pgmap v1386: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:24.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:24 vm00.local ceph-mon[49980]: pgmap v1387: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:24 vm03.local ceph-mon[50983]: pgmap v1387: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:25.244 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:25.245 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:25.270 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:25.271 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:27.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:26 vm00.local ceph-mon[49980]: pgmap v1388: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:27.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:26 vm03.local ceph-mon[50983]: pgmap v1388: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:29.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:28 vm00.local ceph-mon[49980]: pgmap v1389: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:28 vm03.local ceph-mon[50983]: pgmap v1389: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:30.272 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:30.273 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:30.298 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:30.298 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:30 vm00.local ceph-mon[49980]: pgmap v1390: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:30 vm03.local ceph-mon[50983]: pgmap v1390: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:32 vm00.local ceph-mon[49980]: pgmap v1391: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:32 vm03.local ceph-mon[50983]: pgmap v1391: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:34 vm00.local ceph-mon[49980]: pgmap v1392: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:34 vm03.local ceph-mon[50983]: pgmap v1392: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:35.299 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:35.300 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:35.325 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:35.325 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:01:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:01:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:01:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:01:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:36 vm00.local ceph-mon[49980]: pgmap v1393: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:36 vm03.local ceph-mon[50983]: pgmap v1393: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:39.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:38 vm00.local ceph-mon[49980]: pgmap v1394: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:38 vm03.local ceph-mon[50983]: pgmap v1394: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:40.327 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:40.327 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:40.353 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:40.353 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:40 vm00.local ceph-mon[49980]: pgmap v1395: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:40 vm03.local ceph-mon[50983]: pgmap v1395: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:42 vm00.local ceph-mon[49980]: pgmap v1396: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:42 vm03.local ceph-mon[50983]: pgmap v1396: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:44 vm03.local ceph-mon[50983]: pgmap v1397: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:44 vm00.local ceph-mon[49980]: pgmap v1397: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:45.355 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:45.356 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:45.381 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:45.381 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:46 vm00.local ceph-mon[49980]: pgmap v1398: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:46 vm03.local ceph-mon[50983]: pgmap v1398: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:48 vm00.local ceph-mon[49980]: pgmap v1399: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:48 vm03.local ceph-mon[50983]: pgmap v1399: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:50.383 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:50.383 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:50.409 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:50.409 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:50 vm00.local ceph-mon[49980]: pgmap v1400: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:50 vm03.local ceph-mon[50983]: pgmap v1400: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:53 vm00.local ceph-mon[49980]: pgmap v1401: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:53 vm03.local ceph-mon[50983]: pgmap v1401: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:55 vm00.local ceph-mon[49980]: pgmap v1402: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:01:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:01:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:01:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:01:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:01:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:55 vm03.local ceph-mon[50983]: pgmap v1402: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:01:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:01:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:01:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:01:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:01:55.410 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:01:55.411 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:01:55.435 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:01:55.436 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:01:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:57 vm00.local ceph-mon[49980]: pgmap v1403: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:57 vm03.local ceph-mon[50983]: pgmap v1403: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:01:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:01:59 vm00.local ceph-mon[49980]: pgmap v1404: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:01:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:01:59 vm03.local ceph-mon[50983]: pgmap v1404: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:00.437 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:00.438 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:00.463 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:00.463 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:01 vm00.local ceph-mon[49980]: pgmap v1405: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:01 vm03.local ceph-mon[50983]: pgmap v1405: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:03 vm00.local ceph-mon[49980]: pgmap v1406: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:03 vm03.local ceph-mon[50983]: pgmap v1406: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:05.464 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:05.465 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:05.491 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:05.492 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:05 vm00.local ceph-mon[49980]: pgmap v1407: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:05 vm03.local ceph-mon[50983]: pgmap v1407: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:07 vm00.local ceph-mon[49980]: pgmap v1408: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:07 vm03.local ceph-mon[50983]: pgmap v1408: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:09 vm00.local ceph-mon[49980]: pgmap v1409: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:09 vm03.local ceph-mon[50983]: pgmap v1409: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:10.493 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:10.493 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:10.520 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:10.521 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:11 vm00.local ceph-mon[49980]: pgmap v1410: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:11 vm03.local ceph-mon[50983]: pgmap v1410: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:13.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:13 vm00.local ceph-mon[49980]: pgmap v1411: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:13 vm03.local ceph-mon[50983]: pgmap v1411: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:15.522 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:15.523 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:15 vm00.local ceph-mon[49980]: pgmap v1412: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:15 vm03.local ceph-mon[50983]: pgmap v1412: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:15.585 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:15.586 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:17 vm00.local ceph-mon[49980]: pgmap v1413: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:17 vm03.local ceph-mon[50983]: pgmap v1413: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:19.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:19 vm00.local ceph-mon[49980]: pgmap v1414: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:19 vm03.local ceph-mon[50983]: pgmap v1414: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:20.587 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:20.588 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:20.614 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:20.615 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:21 vm00.local ceph-mon[49980]: pgmap v1415: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:21 vm03.local ceph-mon[50983]: pgmap v1415: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:23 vm00.local ceph-mon[49980]: pgmap v1416: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:23 vm03.local ceph-mon[50983]: pgmap v1416: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:25 vm00.local ceph-mon[49980]: pgmap v1417: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:25 vm03.local ceph-mon[50983]: pgmap v1417: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:25.616 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:25.617 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:25.642 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:25.642 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:27 vm00.local ceph-mon[49980]: pgmap v1418: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:27 vm03.local ceph-mon[50983]: pgmap v1418: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:29.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:29 vm00.local ceph-mon[49980]: pgmap v1419: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:29 vm03.local ceph-mon[50983]: pgmap v1419: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:30.644 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:30.644 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:30.669 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:30.669 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:31 vm00.local ceph-mon[49980]: pgmap v1420: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:31 vm03.local ceph-mon[50983]: pgmap v1420: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:33 vm00.local ceph-mon[49980]: pgmap v1421: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:33 vm03.local ceph-mon[50983]: pgmap v1421: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:35 vm03.local ceph-mon[50983]: pgmap v1422: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:02:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:02:35.671 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:35.671 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:35.697 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:35.697 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:35 vm00.local ceph-mon[49980]: pgmap v1422: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:02:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:02:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:37 vm03.local ceph-mon[50983]: pgmap v1423: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:37 vm00.local ceph-mon[49980]: pgmap v1423: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:39 vm03.local ceph-mon[50983]: pgmap v1424: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:39 vm00.local ceph-mon[49980]: pgmap v1424: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:40.699 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:40.699 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:40.726 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:40.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:41 vm03.local ceph-mon[50983]: pgmap v1425: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:41 vm00.local ceph-mon[49980]: pgmap v1425: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:43 vm00.local ceph-mon[49980]: pgmap v1426: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:43 vm03.local ceph-mon[50983]: pgmap v1426: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:45.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:45.728 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:45.755 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:45.756 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:45 vm00.local ceph-mon[49980]: pgmap v1427: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:45 vm03.local ceph-mon[50983]: pgmap v1427: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:47 vm00.local ceph-mon[49980]: pgmap v1428: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:47 vm03.local ceph-mon[50983]: pgmap v1428: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:49 vm00.local ceph-mon[49980]: pgmap v1429: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:49 vm03.local ceph-mon[50983]: pgmap v1429: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:50.757 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:50.757 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:50.785 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:50.786 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:51 vm00.local ceph-mon[49980]: pgmap v1430: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:51 vm03.local ceph-mon[50983]: pgmap v1430: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:53 vm00.local ceph-mon[49980]: pgmap v1431: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:53 vm03.local ceph-mon[50983]: pgmap v1431: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:55 vm00.local ceph-mon[49980]: pgmap v1432: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:02:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:02:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:02:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:02:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:02:55.787 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:02:55.788 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:02:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:55 vm03.local ceph-mon[50983]: pgmap v1432: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:02:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:02:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:02:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:02:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:02:55.813 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:02:55.814 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:02:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:56 vm00.local ceph-mon[49980]: pgmap v1433: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:56 vm03.local ceph-mon[50983]: pgmap v1433: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:02:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:02:58 vm00.local ceph-mon[49980]: pgmap v1434: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:02:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:02:58 vm03.local ceph-mon[50983]: pgmap v1434: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:00.815 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:00.816 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:00.841 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:00.842 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:00 vm00.local ceph-mon[49980]: pgmap v1435: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:00 vm03.local ceph-mon[50983]: pgmap v1435: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:02 vm00.local ceph-mon[49980]: pgmap v1436: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:02 vm03.local ceph-mon[50983]: pgmap v1436: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:04 vm00.local ceph-mon[49980]: pgmap v1437: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:04 vm03.local ceph-mon[50983]: pgmap v1437: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:05.843 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:05.844 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:05.871 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:05.872 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:06 vm00.local ceph-mon[49980]: pgmap v1438: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:06 vm03.local ceph-mon[50983]: pgmap v1438: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:08 vm00.local ceph-mon[49980]: pgmap v1439: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:08 vm03.local ceph-mon[50983]: pgmap v1439: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:10.873 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:10.874 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:10.902 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:10.903 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:10 vm00.local ceph-mon[49980]: pgmap v1440: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:10 vm03.local ceph-mon[50983]: pgmap v1440: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:12 vm00.local ceph-mon[49980]: pgmap v1441: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:12 vm03.local ceph-mon[50983]: pgmap v1441: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:14 vm00.local ceph-mon[49980]: pgmap v1442: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:14 vm03.local ceph-mon[50983]: pgmap v1442: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:15.904 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:15.905 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:16.066 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:16.067 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:17 vm03.local ceph-mon[50983]: pgmap v1443: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:17 vm00.local ceph-mon[49980]: pgmap v1443: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:19 vm00.local ceph-mon[49980]: pgmap v1444: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:19 vm03.local ceph-mon[50983]: pgmap v1444: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:21.068 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:21.069 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:21.097 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:21.098 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:21 vm00.local ceph-mon[49980]: pgmap v1445: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:21 vm03.local ceph-mon[50983]: pgmap v1445: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:23 vm00.local ceph-mon[49980]: pgmap v1446: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:23 vm03.local ceph-mon[50983]: pgmap v1446: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:24 vm00.local ceph-mon[49980]: pgmap v1447: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:24 vm03.local ceph-mon[50983]: pgmap v1447: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:26.099 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:26.099 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:26.239 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:26.240 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:26 vm00.local ceph-mon[49980]: pgmap v1448: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:26 vm03.local ceph-mon[50983]: pgmap v1448: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:28 vm00.local ceph-mon[49980]: pgmap v1449: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:28 vm03.local ceph-mon[50983]: pgmap v1449: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:31.241 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:31.242 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:31.268 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:31.269 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:31 vm00.local ceph-mon[49980]: pgmap v1450: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:31 vm03.local ceph-mon[50983]: pgmap v1450: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:33 vm00.local ceph-mon[49980]: pgmap v1451: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:33 vm03.local ceph-mon[50983]: pgmap v1451: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:35 vm03.local ceph-mon[50983]: pgmap v1452: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:03:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:03:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:35 vm00.local ceph-mon[49980]: pgmap v1452: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:03:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:03:36.270 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:36.271 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:36.297 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:36.297 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:37 vm03.local ceph-mon[50983]: pgmap v1453: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:37 vm00.local ceph-mon[49980]: pgmap v1453: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:39 vm03.local ceph-mon[50983]: pgmap v1454: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:39 vm00.local ceph-mon[49980]: pgmap v1454: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:41.299 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:41.299 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:41.332 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:41.333 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:41 vm00.local ceph-mon[49980]: pgmap v1455: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:41 vm03.local ceph-mon[50983]: pgmap v1455: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:43 vm00.local ceph-mon[49980]: pgmap v1456: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:43 vm03.local ceph-mon[50983]: pgmap v1456: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:45 vm00.local ceph-mon[49980]: pgmap v1457: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:45 vm03.local ceph-mon[50983]: pgmap v1457: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:46.334 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:46.334 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:46.361 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:46.362 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:46 vm00.local ceph-mon[49980]: pgmap v1458: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:46 vm03.local ceph-mon[50983]: pgmap v1458: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:48 vm00.local ceph-mon[49980]: pgmap v1459: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:48 vm03.local ceph-mon[50983]: pgmap v1459: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:50 vm00.local ceph-mon[49980]: pgmap v1460: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:50 vm03.local ceph-mon[50983]: pgmap v1460: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:51.363 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:51.364 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:51.394 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:51.394 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:52 vm00.local ceph-mon[49980]: pgmap v1461: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:52 vm03.local ceph-mon[50983]: pgmap v1461: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:55.212 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:54 vm00.local ceph-mon[49980]: pgmap v1462: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:55.242 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:54 vm03.local ceph-mon[50983]: pgmap v1462: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:03:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:03:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:03:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:03:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:55 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:03:56.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:03:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:03:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:03:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:03:56.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:55 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:03:56.396 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:03:56.396 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:03:56.422 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:03:56.423 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:03:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:57 vm00.local ceph-mon[49980]: pgmap v1463: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:57 vm03.local ceph-mon[50983]: pgmap v1463: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:03:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:03:59 vm00.local ceph-mon[49980]: pgmap v1464: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:03:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:03:59 vm03.local ceph-mon[50983]: pgmap v1464: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:01.424 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:01.424 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:01.451 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:01.452 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:01 vm00.local ceph-mon[49980]: pgmap v1465: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:01 vm03.local ceph-mon[50983]: pgmap v1465: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:03 vm00.local ceph-mon[49980]: pgmap v1466: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:03 vm03.local ceph-mon[50983]: pgmap v1466: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:05 vm00.local ceph-mon[49980]: pgmap v1467: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:05 vm03.local ceph-mon[50983]: pgmap v1467: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:06.453 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:06.454 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:06.481 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:06.482 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:07 vm00.local ceph-mon[49980]: pgmap v1468: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:07 vm03.local ceph-mon[50983]: pgmap v1468: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:09.608 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:09 vm03.local ceph-mon[50983]: pgmap v1469: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:09 vm00.local ceph-mon[49980]: pgmap v1469: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:11.483 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:11.484 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:11.510 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:11.510 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:11 vm00.local ceph-mon[49980]: pgmap v1470: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:11 vm03.local ceph-mon[50983]: pgmap v1470: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:12.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:12 vm00.local ceph-mon[49980]: pgmap v1471: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:12.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:12 vm03.local ceph-mon[50983]: pgmap v1471: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:14 vm00.local ceph-mon[49980]: pgmap v1472: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:14 vm03.local ceph-mon[50983]: pgmap v1472: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:16.512 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:16.512 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:16.588 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:16.589 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:16 vm00.local ceph-mon[49980]: pgmap v1473: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:16 vm03.local ceph-mon[50983]: pgmap v1473: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:18 vm00.local ceph-mon[49980]: pgmap v1474: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:18 vm03.local ceph-mon[50983]: pgmap v1474: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:20 vm00.local ceph-mon[49980]: pgmap v1475: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:20 vm03.local ceph-mon[50983]: pgmap v1475: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:21.591 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:21.591 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:21.617 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:21.617 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:22 vm00.local ceph-mon[49980]: pgmap v1476: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:22 vm03.local ceph-mon[50983]: pgmap v1476: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:24 vm00.local ceph-mon[49980]: pgmap v1477: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:24 vm03.local ceph-mon[50983]: pgmap v1477: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:26.619 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:26.619 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:26.646 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:26.647 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:26 vm00.local ceph-mon[49980]: pgmap v1478: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:26 vm03.local ceph-mon[50983]: pgmap v1478: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:28 vm00.local ceph-mon[49980]: pgmap v1479: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:28 vm03.local ceph-mon[50983]: pgmap v1479: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:30 vm00.local ceph-mon[49980]: pgmap v1480: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:30 vm03.local ceph-mon[50983]: pgmap v1480: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:31.648 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:31.648 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:31.676 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:31.676 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:32 vm00.local ceph-mon[49980]: pgmap v1481: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:32 vm03.local ceph-mon[50983]: pgmap v1481: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:34 vm00.local ceph-mon[49980]: pgmap v1482: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:34 vm03.local ceph-mon[50983]: pgmap v1482: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:04:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:04:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:04:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:04:36.677 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:36.678 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:36.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:36.706 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:36 vm00.local ceph-mon[49980]: pgmap v1483: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:36 vm03.local ceph-mon[50983]: pgmap v1483: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:38 vm00.local ceph-mon[49980]: pgmap v1484: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:38 vm03.local ceph-mon[50983]: pgmap v1484: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:40 vm00.local ceph-mon[49980]: pgmap v1485: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:40 vm03.local ceph-mon[50983]: pgmap v1485: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:41.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:41.708 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:41.736 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:41.737 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:42 vm00.local ceph-mon[49980]: pgmap v1486: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:42 vm03.local ceph-mon[50983]: pgmap v1486: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:44 vm00.local ceph-mon[49980]: pgmap v1487: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:44 vm03.local ceph-mon[50983]: pgmap v1487: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:46.738 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:46.739 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:46.764 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:46.765 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:46 vm00.local ceph-mon[49980]: pgmap v1488: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:46 vm03.local ceph-mon[50983]: pgmap v1488: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:48 vm00.local ceph-mon[49980]: pgmap v1489: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:48 vm03.local ceph-mon[50983]: pgmap v1489: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:51 vm00.local ceph-mon[49980]: pgmap v1490: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:51 vm03.local ceph-mon[50983]: pgmap v1490: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:51.766 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:51.767 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:51.797 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:51.797 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:53 vm00.local ceph-mon[49980]: pgmap v1491: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:53 vm03.local ceph-mon[50983]: pgmap v1491: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:55.279 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:55 vm00.local ceph-mon[49980]: pgmap v1492: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:55.279 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:55 vm03.local ceph-mon[50983]: pgmap v1492: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:04:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:04:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:04:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:04:56.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:04:56.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:04:56.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:04:56.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:04:56.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:04:56.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:04:56.799 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:04:56.799 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:04:56.825 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:04:56.826 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:04:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:57 vm00.local ceph-mon[49980]: pgmap v1493: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:57 vm03.local ceph-mon[50983]: pgmap v1493: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:04:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:04:59 vm00.local ceph-mon[49980]: pgmap v1494: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:04:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:04:59 vm03.local ceph-mon[50983]: pgmap v1494: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:01 vm03.local ceph-mon[50983]: pgmap v1495: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:01 vm00.local ceph-mon[49980]: pgmap v1495: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:01.827 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:01.828 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:01.853 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:01.854 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:03 vm03.local ceph-mon[50983]: pgmap v1496: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:03 vm00.local ceph-mon[49980]: pgmap v1496: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:05 vm03.local ceph-mon[50983]: pgmap v1497: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:05 vm00.local ceph-mon[49980]: pgmap v1497: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:06.856 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:06.856 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:06.885 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:06.885 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:07 vm03.local ceph-mon[50983]: pgmap v1498: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:07 vm00.local ceph-mon[49980]: pgmap v1498: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:09 vm00.local ceph-mon[49980]: pgmap v1499: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:09 vm03.local ceph-mon[50983]: pgmap v1499: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:11 vm00.local ceph-mon[49980]: pgmap v1500: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:11 vm03.local ceph-mon[50983]: pgmap v1500: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:11.887 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:11.887 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:11.912 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:11.913 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:13.333 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:13 vm00.local ceph-mon[49980]: pgmap v1501: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:13 vm03.local ceph-mon[50983]: pgmap v1501: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:15 vm00.local ceph-mon[49980]: pgmap v1502: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:15 vm03.local ceph-mon[50983]: pgmap v1502: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:16.914 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:16.914 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:16.940 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:16.941 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:17 vm00.local ceph-mon[49980]: pgmap v1503: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:17 vm03.local ceph-mon[50983]: pgmap v1503: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:19 vm00.local ceph-mon[49980]: pgmap v1504: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:19 vm03.local ceph-mon[50983]: pgmap v1504: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:21 vm00.local ceph-mon[49980]: pgmap v1505: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:21 vm03.local ceph-mon[50983]: pgmap v1505: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:21.942 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:21.942 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:21.967 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:21.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:23 vm00.local ceph-mon[49980]: pgmap v1506: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:23 vm03.local ceph-mon[50983]: pgmap v1506: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:25.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:25 vm00.local ceph-mon[49980]: pgmap v1507: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:25 vm03.local ceph-mon[50983]: pgmap v1507: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:26.969 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:26.970 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:26.997 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:26.997 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:27 vm00.local ceph-mon[49980]: pgmap v1508: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:27 vm03.local ceph-mon[50983]: pgmap v1508: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:29 vm00.local ceph-mon[49980]: pgmap v1509: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:29 vm03.local ceph-mon[50983]: pgmap v1509: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:31 vm00.local ceph-mon[49980]: pgmap v1510: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:31 vm03.local ceph-mon[50983]: pgmap v1510: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:31.998 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:31.999 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:32.026 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:32.027 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:33 vm00.local ceph-mon[49980]: pgmap v1511: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:33 vm03.local ceph-mon[50983]: pgmap v1511: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:35 vm00.local ceph-mon[49980]: pgmap v1512: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:05:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:05:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:35 vm03.local ceph-mon[50983]: pgmap v1512: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:05:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:05:37.028 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:37.028 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:37.054 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:37.054 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:37 vm00.local ceph-mon[49980]: pgmap v1513: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:37 vm03.local ceph-mon[50983]: pgmap v1513: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:39.608 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:39 vm03.local ceph-mon[50983]: pgmap v1514: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:39 vm00.local ceph-mon[49980]: pgmap v1514: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:40.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:40 vm00.local ceph-mon[49980]: pgmap v1515: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:40.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:40 vm03.local ceph-mon[50983]: pgmap v1515: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:42.056 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:42.056 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:42.081 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:42.081 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:43.483 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:43 vm00.local ceph-mon[49980]: pgmap v1516: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:43 vm03.local ceph-mon[50983]: pgmap v1516: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:45 vm00.local ceph-mon[49980]: pgmap v1517: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:45 vm03.local ceph-mon[50983]: pgmap v1517: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:47.082 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:47.083 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:47.112 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:47.112 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:47 vm00.local ceph-mon[49980]: pgmap v1518: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:47 vm03.local ceph-mon[50983]: pgmap v1518: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:49 vm00.local ceph-mon[49980]: pgmap v1519: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:49.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:49 vm03.local ceph-mon[50983]: pgmap v1519: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:51 vm00.local ceph-mon[49980]: pgmap v1520: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:51 vm03.local ceph-mon[50983]: pgmap v1520: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:52.114 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:52.114 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:52.140 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:52.141 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:53 vm00.local ceph-mon[49980]: pgmap v1521: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:53 vm03.local ceph-mon[50983]: pgmap v1521: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:55.519 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:55 vm03.local ceph-mon[50983]: pgmap v1522: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:55.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:55 vm00.local ceph-mon[49980]: pgmap v1522: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:56.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:05:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:05:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:05:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:05:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:05:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:05:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:05:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:05:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:05:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:05:57.142 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:05:57.143 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:05:57.168 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:05:57.169 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:05:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:57 vm00.local ceph-mon[49980]: pgmap v1523: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:57 vm03.local ceph-mon[50983]: pgmap v1523: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:05:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:05:59 vm00.local ceph-mon[49980]: pgmap v1524: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:05:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:05:59 vm03.local ceph-mon[50983]: pgmap v1524: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:01 vm00.local ceph-mon[49980]: pgmap v1525: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:01 vm03.local ceph-mon[50983]: pgmap v1525: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:02.170 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:02.171 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:02.201 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:02.202 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:03 vm00.local ceph-mon[49980]: pgmap v1526: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:03 vm03.local ceph-mon[50983]: pgmap v1526: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:05 vm00.local ceph-mon[49980]: pgmap v1527: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:05 vm03.local ceph-mon[50983]: pgmap v1527: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:07.203 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:07.204 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:07.231 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:07.232 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:07 vm00.local ceph-mon[49980]: pgmap v1528: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:07 vm03.local ceph-mon[50983]: pgmap v1528: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:09 vm00.local ceph-mon[49980]: pgmap v1529: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:09 vm03.local ceph-mon[50983]: pgmap v1529: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:11 vm00.local ceph-mon[49980]: pgmap v1530: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:11 vm03.local ceph-mon[50983]: pgmap v1530: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:12.233 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:12.234 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:12.266 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:12.267 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:13 vm00.local ceph-mon[49980]: pgmap v1531: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:13 vm03.local ceph-mon[50983]: pgmap v1531: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:15.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:15 vm00.local ceph-mon[49980]: pgmap v1532: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:15 vm03.local ceph-mon[50983]: pgmap v1532: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:17.269 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:17.270 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:17.296 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:17.296 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:17 vm00.local ceph-mon[49980]: pgmap v1533: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:17 vm03.local ceph-mon[50983]: pgmap v1533: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:19 vm00.local ceph-mon[49980]: pgmap v1534: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:19 vm03.local ceph-mon[50983]: pgmap v1534: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:21 vm00.local ceph-mon[49980]: pgmap v1535: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:21 vm03.local ceph-mon[50983]: pgmap v1535: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:22.297 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:22.298 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:22.324 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:22.324 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:23 vm00.local ceph-mon[49980]: pgmap v1536: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:23 vm03.local ceph-mon[50983]: pgmap v1536: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:25.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:25 vm00.local ceph-mon[49980]: pgmap v1537: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:25 vm03.local ceph-mon[50983]: pgmap v1537: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:27.326 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:27.326 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:27.355 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:27.356 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:27 vm00.local ceph-mon[49980]: pgmap v1538: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:27 vm03.local ceph-mon[50983]: pgmap v1538: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:29 vm00.local ceph-mon[49980]: pgmap v1539: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:29 vm03.local ceph-mon[50983]: pgmap v1539: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:31 vm00.local ceph-mon[49980]: pgmap v1540: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:31 vm03.local ceph-mon[50983]: pgmap v1540: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:32.357 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:32.358 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:32.383 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:32.384 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:33.747 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:33 vm00.local ceph-mon[49980]: pgmap v1541: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:33 vm03.local ceph-mon[50983]: pgmap v1541: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:34.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:34 vm00.local ceph-mon[49980]: pgmap v1542: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:34.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:34 vm03.local ceph-mon[50983]: pgmap v1542: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:06:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:06:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:06:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:06:37.385 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:37.386 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:37.504 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:37.505 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:37 vm00.local ceph-mon[49980]: pgmap v1543: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:37 vm03.local ceph-mon[50983]: pgmap v1543: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:38.864 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:38 vm00.local ceph-mon[49980]: pgmap v1544: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:38 vm03.local ceph-mon[50983]: pgmap v1544: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:40 vm00.local ceph-mon[49980]: pgmap v1545: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:06:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:40 vm03.local ceph-mon[50983]: pgmap v1545: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:06:42.506 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:42.506 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:42.532 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:42.532 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:43.748 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:43 vm00.local ceph-mon[49980]: pgmap v1546: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:06:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:43 vm03.local ceph-mon[50983]: pgmap v1546: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:06:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:45 vm00.local ceph-mon[49980]: pgmap v1547: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:45 vm03.local ceph-mon[50983]: pgmap v1547: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:47.534 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:47.534 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:47.561 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:47.562 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:47 vm00.local ceph-mon[49980]: pgmap v1548: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:06:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:47 vm03.local ceph-mon[50983]: pgmap v1548: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:06:48.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:48 vm00.local ceph-mon[49980]: pgmap v1549: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:48.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:48 vm03.local ceph-mon[50983]: pgmap v1549: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:50 vm00.local ceph-mon[49980]: pgmap v1550: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:50 vm03.local ceph-mon[50983]: pgmap v1550: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:52.563 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:52.564 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:52.592 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:52.593 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:53 vm00.local ceph-mon[49980]: pgmap v1551: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:53 vm03.local ceph-mon[50983]: pgmap v1551: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:55 vm03.local ceph-mon[50983]: pgmap v1552: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:55.763 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:55 vm00.local ceph-mon[49980]: pgmap v1552: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:06:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:06:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:06:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:06:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:06:56.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:06:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:06:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:06:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:06:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:06:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:06:56.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:06:57.594 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:06:57.594 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:06:57.620 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:06:57.621 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:06:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:57 vm00.local ceph-mon[49980]: pgmap v1553: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:57 vm03.local ceph-mon[50983]: pgmap v1553: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:06:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:06:59 vm00.local ceph-mon[49980]: pgmap v1554: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:06:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:06:59 vm03.local ceph-mon[50983]: pgmap v1554: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:01 vm00.local ceph-mon[49980]: pgmap v1555: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:01 vm03.local ceph-mon[50983]: pgmap v1555: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:02.622 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:02.623 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:02.648 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:02.649 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:03 vm00.local ceph-mon[49980]: pgmap v1556: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:03 vm03.local ceph-mon[50983]: pgmap v1556: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:05 vm00.local ceph-mon[49980]: pgmap v1557: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:05 vm03.local ceph-mon[50983]: pgmap v1557: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:07.650 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:07.651 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:07.678 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:07.679 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:07 vm00.local ceph-mon[49980]: pgmap v1558: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:07 vm03.local ceph-mon[50983]: pgmap v1558: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:09.608 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:09 vm03.local ceph-mon[50983]: pgmap v1559: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:09 vm00.local ceph-mon[49980]: pgmap v1559: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:11 vm00.local ceph-mon[49980]: pgmap v1560: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:11 vm03.local ceph-mon[50983]: pgmap v1560: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:12.680 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:12.681 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:12.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:12.707 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:13.751 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:13 vm00.local ceph-mon[49980]: pgmap v1561: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:13 vm03.local ceph-mon[50983]: pgmap v1561: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:14.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:14 vm00.local ceph-mon[49980]: pgmap v1562: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:14 vm03.local ceph-mon[50983]: pgmap v1562: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:17.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:17.709 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:17.733 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:17.734 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:17 vm00.local ceph-mon[49980]: pgmap v1563: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:17 vm03.local ceph-mon[50983]: pgmap v1563: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:19 vm00.local ceph-mon[49980]: pgmap v1564: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:19 vm03.local ceph-mon[50983]: pgmap v1564: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:21 vm00.local ceph-mon[49980]: pgmap v1565: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:21 vm03.local ceph-mon[50983]: pgmap v1565: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:22.735 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:22.736 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:22.760 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:22.761 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:23.752 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:23 vm00.local ceph-mon[49980]: pgmap v1566: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:23 vm03.local ceph-mon[50983]: pgmap v1566: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:25 vm00.local ceph-mon[49980]: pgmap v1567: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:25 vm03.local ceph-mon[50983]: pgmap v1567: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:27.762 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:27.763 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:27 vm00.local ceph-mon[49980]: pgmap v1568: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:27.788 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:27.788 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:27 vm03.local ceph-mon[50983]: pgmap v1568: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:28.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:28 vm00.local ceph-mon[49980]: pgmap v1569: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:28.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:28 vm03.local ceph-mon[50983]: pgmap v1569: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:30 vm00.local ceph-mon[49980]: pgmap v1570: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:30 vm03.local ceph-mon[50983]: pgmap v1570: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:32.790 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:32.790 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:32.960 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:32.960 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:33.753 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:33 vm00.local ceph-mon[49980]: pgmap v1571: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:33 vm03.local ceph-mon[50983]: pgmap v1571: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:35 vm00.local ceph-mon[49980]: pgmap v1572: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:07:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:07:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:35 vm03.local ceph-mon[50983]: pgmap v1572: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:07:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:07:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:37 vm00.local ceph-mon[49980]: pgmap v1573: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:37 vm03.local ceph-mon[50983]: pgmap v1573: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:37.961 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:37.962 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:37.989 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:37.989 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:39.608 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:39 vm03.local ceph-mon[50983]: pgmap v1574: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:39 vm00.local ceph-mon[49980]: pgmap v1574: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:41 vm00.local ceph-mon[49980]: pgmap v1575: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:41 vm03.local ceph-mon[50983]: pgmap v1575: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:42.990 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:42.991 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:43.020 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:43.020 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:43.754 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:43 vm00.local ceph-mon[49980]: pgmap v1576: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:43 vm03.local ceph-mon[50983]: pgmap v1576: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:44 vm00.local ceph-mon[49980]: pgmap v1577: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:44 vm03.local ceph-mon[50983]: pgmap v1577: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:47 vm00.local ceph-mon[49980]: pgmap v1578: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:47 vm03.local ceph-mon[50983]: pgmap v1578: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:48.021 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:48.022 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:48.048 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:48.049 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:49 vm00.local ceph-mon[49980]: pgmap v1579: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:49 vm03.local ceph-mon[50983]: pgmap v1579: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:51 vm00.local ceph-mon[49980]: pgmap v1580: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:51 vm03.local ceph-mon[50983]: pgmap v1580: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:53.050 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:53.051 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:53.139 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:53.140 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:53.755 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:53 vm00.local ceph-mon[49980]: pgmap v1581: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:53 vm03.local ceph-mon[50983]: pgmap v1581: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:55 vm00.local ceph-mon[49980]: pgmap v1582: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:55 vm03.local ceph-mon[50983]: pgmap v1582: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:56.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:07:56.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:07:56.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:56 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:07:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:07:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:07:56.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:56 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:07:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:57 vm00.local ceph-mon[49980]: pgmap v1583: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:57 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:07:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:57 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:07:57.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:57 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:07:57.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:57 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:07:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:57 vm03.local ceph-mon[50983]: pgmap v1583: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:07:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:57 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:07:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:57 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:07:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:57 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:07:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:57 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:07:58.141 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:07:58.141 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:07:58.216 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:07:58.216 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:07:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:07:58 vm00.local ceph-mon[49980]: pgmap v1584: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:07:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:07:58 vm03.local ceph-mon[50983]: pgmap v1584: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:00 vm00.local ceph-mon[49980]: pgmap v1585: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:00 vm03.local ceph-mon[50983]: pgmap v1585: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:03.218 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:03.218 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:03.245 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:03.245 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:03.756 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:03 vm00.local ceph-mon[49980]: pgmap v1586: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:03 vm03.local ceph-mon[50983]: pgmap v1586: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:05 vm00.local ceph-mon[49980]: pgmap v1587: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:05 vm03.local ceph-mon[50983]: pgmap v1587: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:07 vm00.local ceph-mon[49980]: pgmap v1588: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:07 vm03.local ceph-mon[50983]: pgmap v1588: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:08.247 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:08.247 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:08.273 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:08.274 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:09 vm00.local ceph-mon[49980]: pgmap v1589: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:09 vm03.local ceph-mon[50983]: pgmap v1589: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:10.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:10 vm00.local ceph-mon[49980]: pgmap v1590: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:10.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:10 vm03.local ceph-mon[50983]: pgmap v1590: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:13.275 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:13.276 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:13.303 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:13.304 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:13.625 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:13 vm00.local ceph-mon[49980]: pgmap v1591: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:13 vm03.local ceph-mon[50983]: pgmap v1591: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:15 vm00.local ceph-mon[49980]: pgmap v1592: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:15 vm03.local ceph-mon[50983]: pgmap v1592: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:17 vm00.local ceph-mon[49980]: pgmap v1593: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:17 vm03.local ceph-mon[50983]: pgmap v1593: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:18.306 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:18.307 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:18.334 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:18.334 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:19 vm00.local ceph-mon[49980]: pgmap v1594: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:19 vm03.local ceph-mon[50983]: pgmap v1594: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:20.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:20 vm00.local ceph-mon[49980]: pgmap v1595: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:20.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:20 vm03.local ceph-mon[50983]: pgmap v1595: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:23.336 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:23.336 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:23.366 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:23.367 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:23.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:23 vm00.local ceph-mon[49980]: pgmap v1596: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:23 vm03.local ceph-mon[50983]: pgmap v1596: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:24.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:24 vm00.local ceph-mon[49980]: pgmap v1597: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:24 vm03.local ceph-mon[50983]: pgmap v1597: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:27 vm00.local ceph-mon[49980]: pgmap v1598: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:27 vm03.local ceph-mon[50983]: pgmap v1598: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:28.368 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:28.369 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:28.402 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:28.402 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:29 vm00.local ceph-mon[49980]: pgmap v1599: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:29 vm03.local ceph-mon[50983]: pgmap v1599: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:31 vm00.local ceph-mon[49980]: pgmap v1600: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:31 vm03.local ceph-mon[50983]: pgmap v1600: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:32.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:32 vm00.local ceph-mon[49980]: pgmap v1601: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:32.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:32 vm03.local ceph-mon[50983]: pgmap v1601: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:33.404 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:33.404 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:33.430 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:33.431 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:34 vm00.local ceph-mon[49980]: pgmap v1602: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:34 vm03.local ceph-mon[50983]: pgmap v1602: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:08:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:08:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:08:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:08:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:37 vm00.local ceph-mon[49980]: pgmap v1603: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:37 vm03.local ceph-mon[50983]: pgmap v1603: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:38.432 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:38.433 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:38.458 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:38.459 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:39 vm00.local ceph-mon[49980]: pgmap v1604: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:39 vm03.local ceph-mon[50983]: pgmap v1604: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:41 vm00.local ceph-mon[49980]: pgmap v1605: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:41 vm03.local ceph-mon[50983]: pgmap v1605: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:43.460 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:43.461 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:43.486 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:43.486 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:43.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:43 vm00.local ceph-mon[49980]: pgmap v1606: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:43 vm03.local ceph-mon[50983]: pgmap v1606: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:44.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:44 vm00.local ceph-mon[49980]: pgmap v1607: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:44.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:44 vm03.local ceph-mon[50983]: pgmap v1607: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:47 vm00.local ceph-mon[49980]: pgmap v1608: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:47 vm03.local ceph-mon[50983]: pgmap v1608: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:48.487 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:48.488 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:48.515 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:48.516 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:49 vm00.local ceph-mon[49980]: pgmap v1609: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:49 vm03.local ceph-mon[50983]: pgmap v1609: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:51 vm00.local ceph-mon[49980]: pgmap v1610: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:51 vm03.local ceph-mon[50983]: pgmap v1610: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:52.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:52 vm00.local ceph-mon[49980]: pgmap v1611: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:52.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:52 vm03.local ceph-mon[50983]: pgmap v1611: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:53.517 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:53.518 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:53.543 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:53.544 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:54 vm00.local ceph-mon[49980]: pgmap v1612: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:54 vm03.local ceph-mon[50983]: pgmap v1612: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:57.523 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:57 vm00.local ceph-mon[49980]: pgmap v1613: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:57.523 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:57 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:08:57.523 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:57 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:08:57.523 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:57 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:08:57.695 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:57 vm03.local ceph-mon[50983]: pgmap v1613: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:08:57.695 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:57 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:08:57.695 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:57 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:08:57.695 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:57 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:08:58.545 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:08:58.546 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:08:58.573 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:08:58.574 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:08:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:08:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:08:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:58 vm00.local ceph-mon[49980]: pgmap v1614: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:08:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:08:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:08:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:08:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:08:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:08:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:58 vm03.local ceph-mon[50983]: pgmap v1614: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:08:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:08:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:08:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:08:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:09:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:00 vm00.local ceph-mon[49980]: pgmap v1615: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:00 vm03.local ceph-mon[50983]: pgmap v1615: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:03.576 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:03.576 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:03.601 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:03.602 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:03.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:03 vm00.local ceph-mon[49980]: pgmap v1616: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:03 vm03.local ceph-mon[50983]: pgmap v1616: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:05 vm00.local ceph-mon[49980]: pgmap v1617: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:05 vm03.local ceph-mon[50983]: pgmap v1617: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:07 vm00.local ceph-mon[49980]: pgmap v1618: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:07 vm03.local ceph-mon[50983]: pgmap v1618: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:08.603 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:08.604 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:08.629 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:08.630 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:09 vm00.local ceph-mon[49980]: pgmap v1619: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:09 vm03.local ceph-mon[50983]: pgmap v1619: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:10.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:10 vm00.local ceph-mon[49980]: pgmap v1620: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:10.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:10 vm03.local ceph-mon[50983]: pgmap v1620: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:13.631 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:13.632 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:13.639 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:13 vm00.local ceph-mon[49980]: pgmap v1621: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:13.658 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:13.659 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:13 vm03.local ceph-mon[50983]: pgmap v1621: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:15 vm00.local ceph-mon[49980]: pgmap v1622: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:15 vm03.local ceph-mon[50983]: pgmap v1622: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:17 vm00.local ceph-mon[49980]: pgmap v1623: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:17 vm03.local ceph-mon[50983]: pgmap v1623: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:18.660 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:18.660 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:18.687 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:18.688 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:18.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:18 vm00.local ceph-mon[49980]: pgmap v1624: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:18.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:18 vm03.local ceph-mon[50983]: pgmap v1624: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:20 vm00.local ceph-mon[49980]: pgmap v1625: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:20 vm03.local ceph-mon[50983]: pgmap v1625: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:23.689 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:23.689 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:23.715 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:23.716 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:23.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:23 vm00.local ceph-mon[49980]: pgmap v1626: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:23 vm03.local ceph-mon[50983]: pgmap v1626: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:24.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:24 vm00.local ceph-mon[49980]: pgmap v1627: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:24 vm03.local ceph-mon[50983]: pgmap v1627: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:27 vm00.local ceph-mon[49980]: pgmap v1628: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:27 vm03.local ceph-mon[50983]: pgmap v1628: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:28.717 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:28.718 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:28.744 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:28.745 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:29 vm00.local ceph-mon[49980]: pgmap v1629: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:29 vm03.local ceph-mon[50983]: pgmap v1629: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:31 vm00.local ceph-mon[49980]: pgmap v1630: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:31 vm03.local ceph-mon[50983]: pgmap v1630: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:32.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:32 vm00.local ceph-mon[49980]: pgmap v1631: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:32.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:32 vm03.local ceph-mon[50983]: pgmap v1631: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:33.746 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:33.747 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:33.773 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:33.774 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:34 vm00.local ceph-mon[49980]: pgmap v1632: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:34 vm03.local ceph-mon[50983]: pgmap v1632: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:09:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:09:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:09:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:09:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:37 vm00.local ceph-mon[49980]: pgmap v1633: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:37 vm03.local ceph-mon[50983]: pgmap v1633: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:38.776 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:38.776 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:38.802 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:38.803 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:39 vm00.local ceph-mon[49980]: pgmap v1634: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:39 vm03.local ceph-mon[50983]: pgmap v1634: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:40.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:40 vm00.local ceph-mon[49980]: pgmap v1635: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:40.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:40 vm03.local ceph-mon[50983]: pgmap v1635: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:42 vm00.local ceph-mon[49980]: pgmap v1636: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:42 vm03.local ceph-mon[50983]: pgmap v1636: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:43.805 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:43.805 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:43.831 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:43.831 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:44 vm00.local ceph-mon[49980]: pgmap v1637: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:44 vm03.local ceph-mon[50983]: pgmap v1637: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:47.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:46 vm00.local ceph-mon[49980]: pgmap v1638: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:46 vm03.local ceph-mon[50983]: pgmap v1638: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:48.833 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:48.833 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:48.861 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:48.861 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:48 vm03.local ceph-mon[50983]: pgmap v1639: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:48 vm00.local ceph-mon[49980]: pgmap v1639: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:50 vm03.local ceph-mon[50983]: pgmap v1640: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:50 vm00.local ceph-mon[49980]: pgmap v1640: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:52.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:52 vm00.local ceph-mon[49980]: pgmap v1641: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:52.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:52 vm03.local ceph-mon[50983]: pgmap v1641: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:53.863 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:53.863 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:53.889 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:53.889 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:54 vm00.local ceph-mon[49980]: pgmap v1642: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:54 vm03.local ceph-mon[50983]: pgmap v1642: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:56 vm03.local ceph-mon[50983]: pgmap v1643: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:56 vm00.local ceph-mon[49980]: pgmap v1643: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:09:58.891 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:09:58.891 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:09:58.918 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:09:58.918 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:09:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:58 vm03.local ceph-mon[50983]: pgmap v1644: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:09:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:09:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:09:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:09:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:09:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:09:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:58 vm00.local ceph-mon[49980]: pgmap v1644: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:09:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:09:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:09:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:09:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:09:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:09:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:10:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:00 vm03.local ceph-mon[50983]: pgmap v1645: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T06:10:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:00 vm00.local ceph-mon[49980]: pgmap v1645: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T06:10:02.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:02 vm00.local ceph-mon[49980]: pgmap v1646: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:02.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:02 vm03.local ceph-mon[50983]: pgmap v1646: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:03.920 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:03.920 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:03.946 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:03.947 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:04 vm03.local ceph-mon[50983]: pgmap v1647: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:04 vm00.local ceph-mon[49980]: pgmap v1647: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:06 vm03.local ceph-mon[50983]: pgmap v1648: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:06 vm00.local ceph-mon[49980]: pgmap v1648: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:08.948 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:08.949 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:08.975 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:08.976 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:08 vm03.local ceph-mon[50983]: pgmap v1649: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:08 vm00.local ceph-mon[49980]: pgmap v1649: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:10 vm00.local ceph-mon[49980]: pgmap v1650: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:10 vm03.local ceph-mon[50983]: pgmap v1650: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:12.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:12 vm00.local ceph-mon[49980]: pgmap v1651: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:12.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:12 vm03.local ceph-mon[50983]: pgmap v1651: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:13.977 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:13.978 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:14.004 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:14.005 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:14 vm03.local ceph-mon[50983]: pgmap v1652: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:14 vm00.local ceph-mon[49980]: pgmap v1652: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:17.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:16 vm03.local ceph-mon[50983]: pgmap v1653: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:16 vm00.local ceph-mon[49980]: pgmap v1653: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:18.886 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:18 vm00.local ceph-mon[49980]: pgmap v1654: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:19.006 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:19.007 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:19.033 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:19.034 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:18 vm03.local ceph-mon[50983]: pgmap v1654: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:20 vm03.local ceph-mon[50983]: pgmap v1655: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:20 vm00.local ceph-mon[49980]: pgmap v1655: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:22.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:22 vm00.local ceph-mon[49980]: pgmap v1656: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:22 vm03.local ceph-mon[50983]: pgmap v1656: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:24.035 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:24.035 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:24.061 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:24.061 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:24 vm03.local ceph-mon[50983]: pgmap v1657: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:24 vm00.local ceph-mon[49980]: pgmap v1657: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:27.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:26 vm03.local ceph-mon[50983]: pgmap v1658: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:26 vm00.local ceph-mon[49980]: pgmap v1658: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:28 vm03.local ceph-mon[50983]: pgmap v1659: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:29.062 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:29.063 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:29.088 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:29.089 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:28 vm00.local ceph-mon[49980]: pgmap v1659: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:30 vm03.local ceph-mon[50983]: pgmap v1660: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:30 vm00.local ceph-mon[49980]: pgmap v1660: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:32.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:32 vm00.local ceph-mon[49980]: pgmap v1661: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:32.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:32 vm03.local ceph-mon[50983]: pgmap v1661: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:34.090 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:34.091 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:34.117 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:34.117 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:34 vm03.local ceph-mon[50983]: pgmap v1662: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:34 vm00.local ceph-mon[49980]: pgmap v1662: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:10:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:10:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:10:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:10:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:36 vm03.local ceph-mon[50983]: pgmap v1663: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:36 vm00.local ceph-mon[49980]: pgmap v1663: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:38 vm03.local ceph-mon[50983]: pgmap v1664: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:39.118 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:39.119 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:39.145 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:39.145 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:38 vm00.local ceph-mon[49980]: pgmap v1664: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:40 vm03.local ceph-mon[50983]: pgmap v1665: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:40 vm00.local ceph-mon[49980]: pgmap v1665: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:42 vm00.local ceph-mon[49980]: pgmap v1666: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:42 vm03.local ceph-mon[50983]: pgmap v1666: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:44.146 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:44.147 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:44.176 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:44.176 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:44 vm03.local ceph-mon[50983]: pgmap v1667: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:44 vm00.local ceph-mon[49980]: pgmap v1667: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:47.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:46 vm03.local ceph-mon[50983]: pgmap v1668: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:46 vm00.local ceph-mon[49980]: pgmap v1668: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:48 vm03.local ceph-mon[50983]: pgmap v1669: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:49.178 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:49.178 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:49.204 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:49.204 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:48 vm00.local ceph-mon[49980]: pgmap v1669: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:50 vm00.local ceph-mon[49980]: pgmap v1670: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:50 vm03.local ceph-mon[50983]: pgmap v1670: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:52 vm00.local ceph-mon[49980]: pgmap v1671: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:52 vm03.local ceph-mon[50983]: pgmap v1671: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:54.206 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:54.206 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:54.232 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:54.232 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:54 vm03.local ceph-mon[50983]: pgmap v1672: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:54 vm00.local ceph-mon[49980]: pgmap v1672: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:56 vm03.local ceph-mon[50983]: pgmap v1673: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:56 vm00.local ceph-mon[49980]: pgmap v1673: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:10:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:58 vm03.local ceph-mon[50983]: pgmap v1674: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:10:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:10:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:10:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:10:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:10:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:10:59.234 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:10:59.234 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:10:59.263 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:10:59.264 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:10:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:58 vm00.local ceph-mon[49980]: pgmap v1674: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:10:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:10:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:10:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:10:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:10:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:10:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:11:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:00 vm00.local ceph-mon[49980]: pgmap v1675: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:00 vm03.local ceph-mon[50983]: pgmap v1675: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:02 vm00.local ceph-mon[49980]: pgmap v1676: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:02 vm03.local ceph-mon[50983]: pgmap v1676: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:04.265 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:04.265 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:04.293 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:04.294 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:04 vm03.local ceph-mon[50983]: pgmap v1677: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:04 vm00.local ceph-mon[49980]: pgmap v1677: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:06 vm03.local ceph-mon[50983]: pgmap v1678: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:06 vm00.local ceph-mon[49980]: pgmap v1678: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:08 vm03.local ceph-mon[50983]: pgmap v1679: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:08 vm00.local ceph-mon[49980]: pgmap v1679: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:09.295 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:09.296 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:09.321 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:09.322 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:10 vm00.local ceph-mon[49980]: pgmap v1680: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:10 vm03.local ceph-mon[50983]: pgmap v1680: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:12 vm00.local ceph-mon[49980]: pgmap v1681: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:12 vm03.local ceph-mon[50983]: pgmap v1681: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:14.323 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:14.324 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:14.350 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:14.350 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:14 vm03.local ceph-mon[50983]: pgmap v1682: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:14 vm00.local ceph-mon[49980]: pgmap v1682: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:17.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:16 vm03.local ceph-mon[50983]: pgmap v1683: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:16 vm00.local ceph-mon[49980]: pgmap v1683: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:18 vm00.local ceph-mon[49980]: pgmap v1684: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:18 vm03.local ceph-mon[50983]: pgmap v1684: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:19.352 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:19.353 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:19.381 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:19.381 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:20 vm00.local ceph-mon[49980]: pgmap v1685: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:20 vm03.local ceph-mon[50983]: pgmap v1685: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:22 vm00.local ceph-mon[49980]: pgmap v1686: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:22 vm03.local ceph-mon[50983]: pgmap v1686: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:24.383 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:24.383 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:24.410 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:24.411 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:24 vm03.local ceph-mon[50983]: pgmap v1687: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:24 vm00.local ceph-mon[49980]: pgmap v1687: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:26 vm00.local ceph-mon[49980]: pgmap v1688: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:26 vm03.local ceph-mon[50983]: pgmap v1688: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:28 vm00.local ceph-mon[49980]: pgmap v1689: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:28 vm03.local ceph-mon[50983]: pgmap v1689: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:29.412 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:29.412 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:29.437 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:29.438 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:30 vm00.local ceph-mon[49980]: pgmap v1690: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:30 vm03.local ceph-mon[50983]: pgmap v1690: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:32 vm00.local ceph-mon[49980]: pgmap v1691: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:32 vm03.local ceph-mon[50983]: pgmap v1691: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:34.439 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:34.440 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:34.466 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:34.466 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:34 vm03.local ceph-mon[50983]: pgmap v1692: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:34 vm00.local ceph-mon[49980]: pgmap v1692: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:11:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:11:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:11:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:11:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:36 vm00.local ceph-mon[49980]: pgmap v1693: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:36 vm03.local ceph-mon[50983]: pgmap v1693: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:39.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:38 vm03.local ceph-mon[50983]: pgmap v1694: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:38 vm00.local ceph-mon[49980]: pgmap v1694: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:39.468 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:39.468 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:39.495 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:39.495 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:40 vm00.local ceph-mon[49980]: pgmap v1695: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:40 vm03.local ceph-mon[50983]: pgmap v1695: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:42 vm00.local ceph-mon[49980]: pgmap v1696: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:42 vm03.local ceph-mon[50983]: pgmap v1696: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:44.497 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:44.497 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:44.522 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:44.523 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:44 vm00.local ceph-mon[49980]: pgmap v1697: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:44 vm03.local ceph-mon[50983]: pgmap v1697: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:46 vm00.local ceph-mon[49980]: pgmap v1698: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:46 vm03.local ceph-mon[50983]: pgmap v1698: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:48 vm00.local ceph-mon[49980]: pgmap v1699: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:48 vm03.local ceph-mon[50983]: pgmap v1699: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:49.524 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:49.525 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:49.551 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:49.552 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:50 vm00.local ceph-mon[49980]: pgmap v1700: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:50 vm03.local ceph-mon[50983]: pgmap v1700: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:52 vm00.local ceph-mon[49980]: pgmap v1701: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:52 vm03.local ceph-mon[50983]: pgmap v1701: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:54.553 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:54.554 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:54.579 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:54.580 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:11:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:54 vm00.local ceph-mon[49980]: pgmap v1702: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:54 vm03.local ceph-mon[50983]: pgmap v1702: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:56 vm00.local ceph-mon[49980]: pgmap v1703: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:56 vm03.local ceph-mon[50983]: pgmap v1703: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:11:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:58 vm00.local ceph-mon[49980]: pgmap v1704: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:11:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:11:59.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:58 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:11:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:58 vm03.local ceph-mon[50983]: pgmap v1704: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:11:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:11:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:11:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:58 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:11:59.581 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:11:59.582 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:11:59.608 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:11:59.608 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:12:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:11:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:12:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:12:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:11:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:12:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:00 vm00.local ceph-mon[49980]: pgmap v1705: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:00 vm03.local ceph-mon[50983]: pgmap v1705: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:02 vm00.local ceph-mon[49980]: pgmap v1706: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:02 vm03.local ceph-mon[50983]: pgmap v1706: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:04.610 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:04.610 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:04.637 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:04.637 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:04 vm00.local ceph-mon[49980]: pgmap v1707: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:04 vm03.local ceph-mon[50983]: pgmap v1707: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:06 vm00.local ceph-mon[49980]: pgmap v1708: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:06 vm03.local ceph-mon[50983]: pgmap v1708: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:09.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:08 vm03.local ceph-mon[50983]: pgmap v1709: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:08 vm00.local ceph-mon[49980]: pgmap v1709: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:09.638 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:09.639 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:09.664 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:09.665 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:10 vm00.local ceph-mon[49980]: pgmap v1710: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:10 vm03.local ceph-mon[50983]: pgmap v1710: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:12 vm00.local ceph-mon[49980]: pgmap v1711: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:12 vm03.local ceph-mon[50983]: pgmap v1711: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:14.666 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:14.667 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:14.694 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:14.694 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:14 vm00.local ceph-mon[49980]: pgmap v1712: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:14 vm03.local ceph-mon[50983]: pgmap v1712: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:16 vm00.local ceph-mon[49980]: pgmap v1713: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:16 vm03.local ceph-mon[50983]: pgmap v1713: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:18 vm00.local ceph-mon[49980]: pgmap v1714: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:18 vm03.local ceph-mon[50983]: pgmap v1714: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:19.696 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:19.697 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:19.725 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:19.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:20 vm00.local ceph-mon[49980]: pgmap v1715: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:20 vm03.local ceph-mon[50983]: pgmap v1715: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:22 vm00.local ceph-mon[49980]: pgmap v1716: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:22 vm03.local ceph-mon[50983]: pgmap v1716: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:24.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:24.728 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:24.880 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:24.880 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:24 vm00.local ceph-mon[49980]: pgmap v1717: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:24 vm03.local ceph-mon[50983]: pgmap v1717: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:26 vm00.local ceph-mon[49980]: pgmap v1718: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:26 vm03.local ceph-mon[50983]: pgmap v1718: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:28 vm00.local ceph-mon[49980]: pgmap v1719: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:28 vm03.local ceph-mon[50983]: pgmap v1719: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:29.881 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:29.882 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:29.908 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:29.909 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:30 vm00.local ceph-mon[49980]: pgmap v1720: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:30 vm03.local ceph-mon[50983]: pgmap v1720: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:32 vm00.local ceph-mon[49980]: pgmap v1721: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:32 vm03.local ceph-mon[50983]: pgmap v1721: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:34.910 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:34.911 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:34.940 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:34.941 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:34 vm00.local ceph-mon[49980]: pgmap v1722: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:34 vm03.local ceph-mon[50983]: pgmap v1722: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:12:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:12:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:12:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:12:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:36 vm00.local ceph-mon[49980]: pgmap v1723: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:36 vm03.local ceph-mon[50983]: pgmap v1723: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:39.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:38 vm03.local ceph-mon[50983]: pgmap v1724: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:38 vm00.local ceph-mon[49980]: pgmap v1724: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:39.942 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:39.943 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:39.970 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:39.970 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:40 vm00.local ceph-mon[49980]: pgmap v1725: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:40 vm03.local ceph-mon[50983]: pgmap v1725: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:42 vm00.local ceph-mon[49980]: pgmap v1726: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:42 vm03.local ceph-mon[50983]: pgmap v1726: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:44.971 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:44.972 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:44.998 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:44.999 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:44 vm00.local ceph-mon[49980]: pgmap v1727: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:44 vm03.local ceph-mon[50983]: pgmap v1727: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:46 vm00.local ceph-mon[49980]: pgmap v1728: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:46 vm03.local ceph-mon[50983]: pgmap v1728: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:48 vm00.local ceph-mon[49980]: pgmap v1729: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:48 vm03.local ceph-mon[50983]: pgmap v1729: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:50.000 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:50.001 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:50.028 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:50.028 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:50 vm00.local ceph-mon[49980]: pgmap v1730: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:50 vm03.local ceph-mon[50983]: pgmap v1730: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:52 vm00.local ceph-mon[49980]: pgmap v1731: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:52 vm03.local ceph-mon[50983]: pgmap v1731: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:55.030 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:12:55.030 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:12:55.056 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:12:55.056 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:12:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:54 vm00.local ceph-mon[49980]: pgmap v1732: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:54 vm03.local ceph-mon[50983]: pgmap v1732: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:56 vm00.local ceph-mon[49980]: pgmap v1733: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:56 vm03.local ceph-mon[50983]: pgmap v1733: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:12:59.214 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:58 vm03.local ceph-mon[50983]: pgmap v1734: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:12:59.223 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:58 vm00.local ceph-mon[49980]: pgmap v1734: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:00.058 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:00.058 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:00.089 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:00.090 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:13:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:13:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:13:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:13:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:12:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:13:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:13:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:13:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:13:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:13:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:12:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:13:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:01 vm00.local ceph-mon[49980]: pgmap v1735: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:01 vm03.local ceph-mon[50983]: pgmap v1735: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:02 vm00.local ceph-mon[49980]: pgmap v1736: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:02 vm03.local ceph-mon[50983]: pgmap v1736: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:05.091 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:05.092 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:05.117 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:05.117 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:04 vm00.local ceph-mon[49980]: pgmap v1737: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:04 vm03.local ceph-mon[50983]: pgmap v1737: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:06 vm00.local ceph-mon[49980]: pgmap v1738: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:06 vm03.local ceph-mon[50983]: pgmap v1738: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:09.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:08 vm03.local ceph-mon[50983]: pgmap v1739: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:08 vm00.local ceph-mon[49980]: pgmap v1739: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:10.119 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:10.120 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:10.148 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:10.148 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:10 vm00.local ceph-mon[49980]: pgmap v1740: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:10 vm03.local ceph-mon[50983]: pgmap v1740: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:12 vm00.local ceph-mon[49980]: pgmap v1741: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:12 vm03.local ceph-mon[50983]: pgmap v1741: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:15.150 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:15.151 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:15.176 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:15.177 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:14 vm00.local ceph-mon[49980]: pgmap v1742: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:14 vm03.local ceph-mon[50983]: pgmap v1742: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:16 vm00.local ceph-mon[49980]: pgmap v1743: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:16 vm03.local ceph-mon[50983]: pgmap v1743: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:18 vm00.local ceph-mon[49980]: pgmap v1744: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:18 vm03.local ceph-mon[50983]: pgmap v1744: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:20.179 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:20.179 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:20.206 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:20.207 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:20 vm00.local ceph-mon[49980]: pgmap v1745: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:20 vm03.local ceph-mon[50983]: pgmap v1745: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:22 vm00.local ceph-mon[49980]: pgmap v1746: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:22 vm03.local ceph-mon[50983]: pgmap v1746: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:25.208 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:25.208 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:25.233 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:25.234 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:24 vm00.local ceph-mon[49980]: pgmap v1747: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:24 vm03.local ceph-mon[50983]: pgmap v1747: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:26 vm00.local ceph-mon[49980]: pgmap v1748: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:26 vm03.local ceph-mon[50983]: pgmap v1748: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:28 vm00.local ceph-mon[49980]: pgmap v1749: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:28 vm03.local ceph-mon[50983]: pgmap v1749: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:30.235 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:30.236 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:30.263 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:30.264 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:30 vm00.local ceph-mon[49980]: pgmap v1750: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:30 vm03.local ceph-mon[50983]: pgmap v1750: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:32 vm00.local ceph-mon[49980]: pgmap v1751: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:32 vm03.local ceph-mon[50983]: pgmap v1751: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:35.265 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:35.266 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:34 vm00.local ceph-mon[49980]: pgmap v1752: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:35.291 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:35.292 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:34 vm03.local ceph-mon[50983]: pgmap v1752: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:13:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:13:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:13:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:13:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:36 vm00.local ceph-mon[49980]: pgmap v1753: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:36 vm03.local ceph-mon[50983]: pgmap v1753: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:39.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:38 vm03.local ceph-mon[50983]: pgmap v1754: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:38 vm00.local ceph-mon[49980]: pgmap v1754: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:40.293 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:40.294 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:40.320 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:40.321 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:40 vm00.local ceph-mon[49980]: pgmap v1755: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:40 vm03.local ceph-mon[50983]: pgmap v1755: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:42 vm00.local ceph-mon[49980]: pgmap v1756: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:42 vm03.local ceph-mon[50983]: pgmap v1756: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:44 vm00.local ceph-mon[49980]: pgmap v1757: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:44 vm03.local ceph-mon[50983]: pgmap v1757: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:45.322 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:45.323 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:45.349 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:45.349 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:46 vm00.local ceph-mon[49980]: pgmap v1758: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:46 vm03.local ceph-mon[50983]: pgmap v1758: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:48 vm00.local ceph-mon[49980]: pgmap v1759: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:48 vm03.local ceph-mon[50983]: pgmap v1759: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:50.350 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:50.351 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:50.378 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:50.379 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:50 vm00.local ceph-mon[49980]: pgmap v1760: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:50 vm03.local ceph-mon[50983]: pgmap v1760: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:52 vm03.local ceph-mon[50983]: pgmap v1761: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:52 vm00.local ceph-mon[49980]: pgmap v1761: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:54 vm00.local ceph-mon[49980]: pgmap v1762: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:54 vm03.local ceph-mon[50983]: pgmap v1762: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:55.380 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:13:55.381 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:13:55.406 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:13:55.407 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:13:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:56 vm00.local ceph-mon[49980]: pgmap v1763: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:56 vm03.local ceph-mon[50983]: pgmap v1763: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:13:59.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:58 vm03.local ceph-mon[50983]: pgmap v1764: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:13:59.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:58 vm00.local ceph-mon[49980]: pgmap v1764: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:14:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:14:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:14:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:14:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:13:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:14:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:14:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:14:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:14:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:14:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:13:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:14:00.408 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:00.409 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:00.441 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:00.442 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:00 vm00.local ceph-mon[49980]: pgmap v1765: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:00 vm03.local ceph-mon[50983]: pgmap v1765: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:02 vm00.local ceph-mon[49980]: pgmap v1766: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:02 vm03.local ceph-mon[50983]: pgmap v1766: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:04 vm00.local ceph-mon[49980]: pgmap v1767: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:04 vm03.local ceph-mon[50983]: pgmap v1767: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:05.443 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:05.444 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:05.471 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:05.471 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:06 vm00.local ceph-mon[49980]: pgmap v1768: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:06 vm03.local ceph-mon[50983]: pgmap v1768: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:09.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:08 vm03.local ceph-mon[50983]: pgmap v1769: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:08 vm00.local ceph-mon[49980]: pgmap v1769: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:10.473 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:10.473 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:10.500 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:10.501 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:10 vm00.local ceph-mon[49980]: pgmap v1770: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:10 vm03.local ceph-mon[50983]: pgmap v1770: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:13.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:12 vm00.local ceph-mon[49980]: pgmap v1771: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:12 vm03.local ceph-mon[50983]: pgmap v1771: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:14 vm00.local ceph-mon[49980]: pgmap v1772: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:14 vm03.local ceph-mon[50983]: pgmap v1772: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:15.502 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:15.503 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:15.529 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:15.530 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:16 vm00.local ceph-mon[49980]: pgmap v1773: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:16 vm03.local ceph-mon[50983]: pgmap v1773: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:18 vm00.local ceph-mon[49980]: pgmap v1774: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:18 vm03.local ceph-mon[50983]: pgmap v1774: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:20.531 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:20.532 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:20.558 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:20.558 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:20 vm00.local ceph-mon[49980]: pgmap v1775: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:20 vm03.local ceph-mon[50983]: pgmap v1775: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:22 vm00.local ceph-mon[49980]: pgmap v1776: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:22 vm03.local ceph-mon[50983]: pgmap v1776: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:24 vm00.local ceph-mon[49980]: pgmap v1777: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:24 vm03.local ceph-mon[50983]: pgmap v1777: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:25.560 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:25.561 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:25.589 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:25.589 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:26 vm00.local ceph-mon[49980]: pgmap v1778: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:26 vm03.local ceph-mon[50983]: pgmap v1778: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:28 vm00.local ceph-mon[49980]: pgmap v1779: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:28 vm03.local ceph-mon[50983]: pgmap v1779: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:30.590 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:30.591 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:30.617 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:30.618 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:30 vm00.local ceph-mon[49980]: pgmap v1780: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:30 vm03.local ceph-mon[50983]: pgmap v1780: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:32 vm00.local ceph-mon[49980]: pgmap v1781: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:32 vm03.local ceph-mon[50983]: pgmap v1781: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:34 vm00.local ceph-mon[49980]: pgmap v1782: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:34 vm03.local ceph-mon[50983]: pgmap v1782: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:35.619 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:35.620 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:35.652 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:35.652 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:14:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:14:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:14:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:14:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:36 vm00.local ceph-mon[49980]: pgmap v1783: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:36 vm03.local ceph-mon[50983]: pgmap v1783: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:38 vm00.local ceph-mon[49980]: pgmap v1784: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:38 vm03.local ceph-mon[50983]: pgmap v1784: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:40.654 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:40.655 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:40.823 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:40.823 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:40 vm00.local ceph-mon[49980]: pgmap v1785: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:40 vm03.local ceph-mon[50983]: pgmap v1785: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:42 vm00.local ceph-mon[49980]: pgmap v1786: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:42 vm03.local ceph-mon[50983]: pgmap v1786: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:44 vm00.local ceph-mon[49980]: pgmap v1787: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:44 vm03.local ceph-mon[50983]: pgmap v1787: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:45.825 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:45.825 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:45.851 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:45.851 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:46 vm00.local ceph-mon[49980]: pgmap v1788: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:46 vm03.local ceph-mon[50983]: pgmap v1788: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:48 vm00.local ceph-mon[49980]: pgmap v1789: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:48 vm03.local ceph-mon[50983]: pgmap v1789: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:50.853 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:50.854 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:50.884 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:50.884 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:50 vm00.local ceph-mon[49980]: pgmap v1790: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:50 vm03.local ceph-mon[50983]: pgmap v1790: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:52 vm00.local ceph-mon[49980]: pgmap v1791: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:52 vm03.local ceph-mon[50983]: pgmap v1791: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:54 vm00.local ceph-mon[49980]: pgmap v1792: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:54 vm03.local ceph-mon[50983]: pgmap v1792: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:55.886 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:14:55.886 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:14:55.912 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:14:55.913 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:14:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:56 vm00.local ceph-mon[49980]: pgmap v1793: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:56 vm03.local ceph-mon[50983]: pgmap v1793: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:14:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:58 vm00.local ceph-mon[49980]: pgmap v1794: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:58 vm03.local ceph-mon[50983]: pgmap v1794: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:14:59.854 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:14:59.854 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:14:59.854 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:14:59.854 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:14:59.854 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:14:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:15:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:15:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:15:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:15:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:15:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:14:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:15:00.915 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:00.915 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:00.945 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:00.946 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:00 vm00.local ceph-mon[49980]: pgmap v1795: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:00 vm03.local ceph-mon[50983]: pgmap v1795: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:02 vm00.local ceph-mon[49980]: pgmap v1796: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:02 vm03.local ceph-mon[50983]: pgmap v1796: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:04 vm00.local ceph-mon[49980]: pgmap v1797: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:04 vm03.local ceph-mon[50983]: pgmap v1797: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:05.947 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:05.948 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:05.974 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:05.975 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:06 vm00.local ceph-mon[49980]: pgmap v1798: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:06 vm03.local ceph-mon[50983]: pgmap v1798: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:09.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:08 vm03.local ceph-mon[50983]: pgmap v1799: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:08 vm00.local ceph-mon[49980]: pgmap v1799: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:10.976 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:10.977 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:11.005 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:11.005 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:10 vm00.local ceph-mon[49980]: pgmap v1800: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:10 vm03.local ceph-mon[50983]: pgmap v1800: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:13.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:12 vm00.local ceph-mon[49980]: pgmap v1801: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:12 vm03.local ceph-mon[50983]: pgmap v1801: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:14 vm00.local ceph-mon[49980]: pgmap v1802: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:14 vm03.local ceph-mon[50983]: pgmap v1802: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:16.006 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:16.007 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:16.035 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:16.036 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:16 vm00.local ceph-mon[49980]: pgmap v1803: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:16 vm03.local ceph-mon[50983]: pgmap v1803: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:18 vm00.local ceph-mon[49980]: pgmap v1804: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:18 vm03.local ceph-mon[50983]: pgmap v1804: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:21.037 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:21.038 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:21.063 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:21.064 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:20 vm00.local ceph-mon[49980]: pgmap v1805: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:20 vm03.local ceph-mon[50983]: pgmap v1805: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:22 vm00.local ceph-mon[49980]: pgmap v1806: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:22 vm03.local ceph-mon[50983]: pgmap v1806: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:24 vm00.local ceph-mon[49980]: pgmap v1807: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:24 vm03.local ceph-mon[50983]: pgmap v1807: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:26.066 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:26.066 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:26.093 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:26.094 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:26 vm00.local ceph-mon[49980]: pgmap v1808: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:26 vm03.local ceph-mon[50983]: pgmap v1808: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:28 vm00.local ceph-mon[49980]: pgmap v1809: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:28 vm03.local ceph-mon[50983]: pgmap v1809: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:31.096 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:31.096 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:31.138 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:31.138 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:30 vm00.local ceph-mon[49980]: pgmap v1810: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:30 vm03.local ceph-mon[50983]: pgmap v1810: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:33.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:32 vm00.local ceph-mon[49980]: pgmap v1811: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:32 vm03.local ceph-mon[50983]: pgmap v1811: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:34 vm00.local ceph-mon[49980]: pgmap v1812: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:34 vm03.local ceph-mon[50983]: pgmap v1812: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:36.139 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:36.140 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:36.166 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:36.166 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:15:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:15:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:15:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:15:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:36 vm00.local ceph-mon[49980]: pgmap v1813: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:36 vm03.local ceph-mon[50983]: pgmap v1813: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:38 vm00.local ceph-mon[49980]: pgmap v1814: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:38 vm03.local ceph-mon[50983]: pgmap v1814: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:41.168 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:41.168 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:41.197 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:41.197 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:40 vm00.local ceph-mon[49980]: pgmap v1815: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:40 vm03.local ceph-mon[50983]: pgmap v1815: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:42 vm00.local ceph-mon[49980]: pgmap v1816: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:42 vm03.local ceph-mon[50983]: pgmap v1816: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:44 vm00.local ceph-mon[49980]: pgmap v1817: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:44 vm03.local ceph-mon[50983]: pgmap v1817: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:46.199 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:46.199 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:46.225 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:46.226 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:46 vm00.local ceph-mon[49980]: pgmap v1818: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:46 vm03.local ceph-mon[50983]: pgmap v1818: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:49 vm00.local ceph-mon[49980]: pgmap v1819: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:49 vm03.local ceph-mon[50983]: pgmap v1819: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:51.227 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:51.228 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:51.253 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:51.254 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:51 vm00.local ceph-mon[49980]: pgmap v1820: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:51 vm03.local ceph-mon[50983]: pgmap v1820: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:52 vm03.local ceph-mon[50983]: pgmap v1821: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:52 vm00.local ceph-mon[49980]: pgmap v1821: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:54 vm00.local ceph-mon[49980]: pgmap v1822: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:54 vm03.local ceph-mon[50983]: pgmap v1822: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:56.255 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:15:56.256 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:15:56.284 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:15:56.285 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:15:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:56 vm00.local ceph-mon[49980]: pgmap v1823: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:56 vm03.local ceph-mon[50983]: pgmap v1823: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:15:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:58 vm00.local ceph-mon[49980]: pgmap v1824: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:15:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:58 vm03.local ceph-mon[50983]: pgmap v1824: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:16:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:16:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:15:59 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:16:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:16:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:16:00.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:15:59 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:16:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:01 vm00.local ceph-mon[49980]: pgmap v1825: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:01 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:16:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:01 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:16:01.286 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:01.287 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:01 vm03.local ceph-mon[50983]: pgmap v1825: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:01 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:16:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:01 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:16:01.313 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:01.314 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:02 vm00.local ceph-mon[49980]: pgmap v1826: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:02 vm03.local ceph-mon[50983]: pgmap v1826: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:04 vm00.local ceph-mon[49980]: pgmap v1827: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:04 vm03.local ceph-mon[50983]: pgmap v1827: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:06.316 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:06.317 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:06.343 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:06.343 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:06 vm00.local ceph-mon[49980]: pgmap v1828: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:06 vm03.local ceph-mon[50983]: pgmap v1828: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:08 vm00.local ceph-mon[49980]: pgmap v1829: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:08 vm03.local ceph-mon[50983]: pgmap v1829: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:10 vm00.local ceph-mon[49980]: pgmap v1830: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:10 vm03.local ceph-mon[50983]: pgmap v1830: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:11.345 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:11.346 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:11.375 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:11.375 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:13.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:12 vm03.local ceph-mon[50983]: pgmap v1831: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:13.082 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:12 vm00.local ceph-mon[49980]: pgmap v1831: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:14 vm00.local ceph-mon[49980]: pgmap v1832: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:14 vm03.local ceph-mon[50983]: pgmap v1832: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:16.377 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:16.377 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:16.403 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:16.404 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:16 vm00.local ceph-mon[49980]: pgmap v1833: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:16 vm03.local ceph-mon[50983]: pgmap v1833: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:18 vm00.local ceph-mon[49980]: pgmap v1834: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:18 vm03.local ceph-mon[50983]: pgmap v1834: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:20 vm00.local ceph-mon[49980]: pgmap v1835: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:20 vm03.local ceph-mon[50983]: pgmap v1835: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:21.405 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:21.405 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:21.431 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:21.432 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:22 vm03.local ceph-mon[50983]: pgmap v1836: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:22 vm00.local ceph-mon[49980]: pgmap v1836: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:24 vm00.local ceph-mon[49980]: pgmap v1837: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:24 vm03.local ceph-mon[50983]: pgmap v1837: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:26.433 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:26.434 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:26.459 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:26.460 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:26 vm00.local ceph-mon[49980]: pgmap v1838: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:26 vm03.local ceph-mon[50983]: pgmap v1838: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:28 vm00.local ceph-mon[49980]: pgmap v1839: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:28 vm03.local ceph-mon[50983]: pgmap v1839: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:30 vm00.local ceph-mon[49980]: pgmap v1840: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:30 vm03.local ceph-mon[50983]: pgmap v1840: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:31.461 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:31.461 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:31.489 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:31.489 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:32 vm03.local ceph-mon[50983]: pgmap v1841: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:32 vm00.local ceph-mon[49980]: pgmap v1841: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:34 vm00.local ceph-mon[49980]: pgmap v1842: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:34 vm03.local ceph-mon[50983]: pgmap v1842: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:16:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:16:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:16:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:16:36.490 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:36.491 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:36.621 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:36.621 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:37 vm00.local ceph-mon[49980]: pgmap v1843: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:37 vm03.local ceph-mon[50983]: pgmap v1843: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:39 vm00.local ceph-mon[49980]: pgmap v1844: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:39 vm03.local ceph-mon[50983]: pgmap v1844: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:41 vm00.local ceph-mon[49980]: pgmap v1845: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:41 vm03.local ceph-mon[50983]: pgmap v1845: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:41.623 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:41.623 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:41.650 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:41.650 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:42 vm00.local ceph-mon[49980]: pgmap v1846: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:42 vm03.local ceph-mon[50983]: pgmap v1846: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:44 vm00.local ceph-mon[49980]: pgmap v1847: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:44 vm03.local ceph-mon[50983]: pgmap v1847: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:46.652 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:46.652 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:46.680 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:46.681 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:46 vm00.local ceph-mon[49980]: pgmap v1848: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:46 vm03.local ceph-mon[50983]: pgmap v1848: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:48 vm00.local ceph-mon[49980]: pgmap v1849: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:48 vm03.local ceph-mon[50983]: pgmap v1849: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:50 vm00.local ceph-mon[49980]: pgmap v1850: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:50 vm03.local ceph-mon[50983]: pgmap v1850: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:51.682 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:51.683 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:51.711 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:51.712 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:52 vm00.local ceph-mon[49980]: pgmap v1851: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:52 vm03.local ceph-mon[50983]: pgmap v1851: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:54 vm00.local ceph-mon[49980]: pgmap v1852: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:54 vm03.local ceph-mon[50983]: pgmap v1852: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:56.714 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:16:56.714 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:16:56.739 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:16:56.740 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:16:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:56 vm00.local ceph-mon[49980]: pgmap v1853: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:56 vm03.local ceph-mon[50983]: pgmap v1853: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:16:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:16:58 vm00.local ceph-mon[49980]: pgmap v1854: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:16:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:16:58 vm03.local ceph-mon[50983]: pgmap v1854: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: pgmap v1855: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:17:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:17:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:17:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:00 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: pgmap v1855: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:00 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:17:01.742 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:01.742 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:01.769 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:01.770 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:02 vm00.local ceph-mon[49980]: pgmap v1856: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:02 vm03.local ceph-mon[50983]: pgmap v1856: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:04 vm00.local ceph-mon[49980]: pgmap v1857: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:04 vm03.local ceph-mon[50983]: pgmap v1857: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:06.771 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:06.772 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:06.798 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:06.798 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:07.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:06 vm00.local ceph-mon[49980]: pgmap v1858: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:06 vm03.local ceph-mon[50983]: pgmap v1858: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:09.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:08 vm03.local ceph-mon[50983]: pgmap v1859: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:08 vm00.local ceph-mon[49980]: pgmap v1859: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:10 vm00.local ceph-mon[49980]: pgmap v1860: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:10 vm03.local ceph-mon[50983]: pgmap v1860: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:11.800 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:11.801 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:11.828 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:11.828 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:13.083 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:12 vm00.local ceph-mon[49980]: pgmap v1861: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:12 vm03.local ceph-mon[50983]: pgmap v1861: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:14 vm00.local ceph-mon[49980]: pgmap v1862: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:14 vm03.local ceph-mon[50983]: pgmap v1862: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:16.830 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:16.830 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:16.860 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:16.860 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:16 vm00.local ceph-mon[49980]: pgmap v1863: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:16 vm03.local ceph-mon[50983]: pgmap v1863: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:18 vm00.local ceph-mon[49980]: pgmap v1864: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:18 vm03.local ceph-mon[50983]: pgmap v1864: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:20 vm00.local ceph-mon[49980]: pgmap v1865: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:20 vm03.local ceph-mon[50983]: pgmap v1865: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:21.862 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:21.863 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:21.983 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:21.984 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:23.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:22 vm00.local ceph-mon[49980]: pgmap v1866: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:22 vm03.local ceph-mon[50983]: pgmap v1866: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:24 vm00.local ceph-mon[49980]: pgmap v1867: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:24 vm03.local ceph-mon[50983]: pgmap v1867: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:26.985 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:26.985 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:27.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:27.011 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:26 vm00.local ceph-mon[49980]: pgmap v1868: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:26 vm03.local ceph-mon[50983]: pgmap v1868: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:28 vm00.local ceph-mon[49980]: pgmap v1869: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:28 vm03.local ceph-mon[50983]: pgmap v1869: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:30 vm00.local ceph-mon[49980]: pgmap v1870: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:30 vm03.local ceph-mon[50983]: pgmap v1870: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:32.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:32.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:32.040 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:32.040 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:32 vm00.local ceph-mon[49980]: pgmap v1871: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:32 vm03.local ceph-mon[50983]: pgmap v1871: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:34 vm00.local ceph-mon[49980]: pgmap v1872: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:34 vm03.local ceph-mon[50983]: pgmap v1872: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:17:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:17:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:17:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:17:37.042 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:37.042 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:37.068 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:37.068 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:36 vm00.local ceph-mon[49980]: pgmap v1873: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:36 vm03.local ceph-mon[50983]: pgmap v1873: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:39.108 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:38 vm03.local ceph-mon[50983]: pgmap v1874: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:38 vm00.local ceph-mon[49980]: pgmap v1874: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:40 vm00.local ceph-mon[49980]: pgmap v1875: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:40 vm03.local ceph-mon[50983]: pgmap v1875: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:42.069 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:42.070 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:42.098 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:42.099 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:43 vm00.local ceph-mon[49980]: pgmap v1876: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:43 vm03.local ceph-mon[50983]: pgmap v1876: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:45 vm00.local ceph-mon[49980]: pgmap v1877: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:45 vm03.local ceph-mon[50983]: pgmap v1877: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:47.100 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:47.101 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:47.129 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:47.130 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:47 vm00.local ceph-mon[49980]: pgmap v1878: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:47 vm03.local ceph-mon[50983]: pgmap v1878: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:49 vm00.local ceph-mon[49980]: pgmap v1879: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:49 vm03.local ceph-mon[50983]: pgmap v1879: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:51 vm03.local ceph-mon[50983]: pgmap v1880: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:51 vm00.local ceph-mon[49980]: pgmap v1880: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:52.132 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:52.132 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:52.159 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:52.160 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:53 vm03.local ceph-mon[50983]: pgmap v1881: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:53 vm00.local ceph-mon[49980]: pgmap v1881: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:55 vm03.local ceph-mon[50983]: pgmap v1882: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:55 vm00.local ceph-mon[49980]: pgmap v1882: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:57.162 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:17:57.162 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:17:57.190 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:17:57.190 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:17:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:57 vm00.local ceph-mon[49980]: pgmap v1883: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:57 vm03.local ceph-mon[50983]: pgmap v1883: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:17:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:17:59 vm00.local ceph-mon[49980]: pgmap v1884: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:17:59.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:17:59 vm03.local ceph-mon[50983]: pgmap v1884: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:01.207 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:01 vm00.local ceph-mon[49980]: pgmap v1885: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:01.207 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:01 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:18:01.207 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:01 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:18:01.207 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:01 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:18:01.207 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:01 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:01 vm03.local ceph-mon[50983]: pgmap v1885: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:01 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:18:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:01 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:18:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:01 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:18:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:01 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:02.196 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:02.197 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:02.228 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:02.229 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:03.360 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:03 vm00.local ceph-mon[49980]: pgmap v1886: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:03.360 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:03 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:03.360 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:03 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:18:03.360 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:03 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:03.360 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:03 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:03 vm03.local ceph-mon[50983]: pgmap v1886: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:03 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:03 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:18:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:03 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:03 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:18:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:05 vm00.local ceph-mon[49980]: pgmap v1887: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:05 vm03.local ceph-mon[50983]: pgmap v1887: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:07.230 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:07.232 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:07.260 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:07.261 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:07 vm00.local ceph-mon[49980]: pgmap v1888: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:07 vm03.local ceph-mon[50983]: pgmap v1888: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:09 vm00.local ceph-mon[49980]: pgmap v1889: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:09 vm03.local ceph-mon[50983]: pgmap v1889: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:11 vm00.local ceph-mon[49980]: pgmap v1890: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:11 vm03.local ceph-mon[50983]: pgmap v1890: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:12.262 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:12.263 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:12.291 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:12.292 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:13.339 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:13 vm00.local ceph-mon[49980]: pgmap v1891: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:13 vm03.local ceph-mon[50983]: pgmap v1891: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:15 vm00.local ceph-mon[49980]: pgmap v1892: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:15 vm03.local ceph-mon[50983]: pgmap v1892: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:17.293 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:17.293 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:17.318 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:17.319 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:17 vm00.local ceph-mon[49980]: pgmap v1893: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:17 vm03.local ceph-mon[50983]: pgmap v1893: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:19 vm00.local ceph-mon[49980]: pgmap v1894: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:19 vm03.local ceph-mon[50983]: pgmap v1894: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:21 vm00.local ceph-mon[49980]: pgmap v1895: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:21 vm03.local ceph-mon[50983]: pgmap v1895: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:22.320 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:22.321 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:22.346 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:22.346 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:23 vm00.local ceph-mon[49980]: pgmap v1896: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:23 vm03.local ceph-mon[50983]: pgmap v1896: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:24.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:24 vm00.local ceph-mon[49980]: pgmap v1897: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:24.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:24 vm03.local ceph-mon[50983]: pgmap v1897: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:26 vm00.local ceph-mon[49980]: pgmap v1898: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:26 vm03.local ceph-mon[50983]: pgmap v1898: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:27.348 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:27.348 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:27.376 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:27.377 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:28 vm00.local ceph-mon[49980]: pgmap v1899: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:28 vm03.local ceph-mon[50983]: pgmap v1899: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:30 vm00.local ceph-mon[49980]: pgmap v1900: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:30 vm03.local ceph-mon[50983]: pgmap v1900: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:32.378 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:32.378 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:32.404 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:32.405 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:32 vm00.local ceph-mon[49980]: pgmap v1901: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:32 vm03.local ceph-mon[50983]: pgmap v1901: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:34 vm00.local ceph-mon[49980]: pgmap v1902: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:34 vm03.local ceph-mon[50983]: pgmap v1902: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:18:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:18:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:18:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:18:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:36 vm00.local ceph-mon[49980]: pgmap v1903: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:36 vm03.local ceph-mon[50983]: pgmap v1903: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:37.406 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:37.407 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:37.432 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:37.433 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:38 vm00.local ceph-mon[49980]: pgmap v1904: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:38 vm03.local ceph-mon[50983]: pgmap v1904: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:41 vm00.local ceph-mon[49980]: pgmap v1905: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:41 vm03.local ceph-mon[50983]: pgmap v1905: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:42.434 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:42.435 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:42.460 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:42.460 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:43 vm00.local ceph-mon[49980]: pgmap v1906: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:43 vm03.local ceph-mon[50983]: pgmap v1906: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:45 vm00.local ceph-mon[49980]: pgmap v1907: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:45 vm03.local ceph-mon[50983]: pgmap v1907: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:47 vm00.local ceph-mon[49980]: pgmap v1908: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:47 vm03.local ceph-mon[50983]: pgmap v1908: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:47.462 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:47.463 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:47.488 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:47.489 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:49 vm00.local ceph-mon[49980]: pgmap v1909: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:49 vm03.local ceph-mon[50983]: pgmap v1909: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:18:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:51 vm03.local ceph-mon[50983]: pgmap v1910: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:51 vm00.local ceph-mon[49980]: pgmap v1910: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:52.491 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:52.491 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:52.518 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:52.518 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:53 vm03.local ceph-mon[50983]: pgmap v1911: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:53 vm00.local ceph-mon[49980]: pgmap v1911: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:55 vm03.local ceph-mon[50983]: pgmap v1912: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:55 vm00.local ceph-mon[49980]: pgmap v1912: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:57 vm03.local ceph-mon[50983]: pgmap v1913: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:18:57.520 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:18:57.520 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:18:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:57 vm00.local ceph-mon[49980]: pgmap v1913: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:18:57.551 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:18:57.551 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:18:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:18:59 vm00.local ceph-mon[49980]: pgmap v1914: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:18:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:18:59 vm03.local ceph-mon[50983]: pgmap v1914: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:01.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:01 vm00.local ceph-mon[49980]: pgmap v1915: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:19:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:01 vm03.local ceph-mon[50983]: pgmap v1915: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:19:02.555 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:02.555 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:02.588 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:02.589 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:03.107 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:03 vm00.local ceph-mon[49980]: pgmap v1916: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:19:03.107 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:03 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:19:03.107 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:03 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:19:03.107 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:03 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:19:03.141 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:03 vm03.local ceph-mon[50983]: pgmap v1916: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:19:03.141 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:03 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:19:03.141 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:03 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:19:03.141 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:03 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:19:04.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:04 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:04.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:04 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:04.548 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:04 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:04.548 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:04 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:05 vm00.local ceph-mon[49980]: pgmap v1917: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:19:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:05 vm03.local ceph-mon[50983]: pgmap v1917: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:19:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:19:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:07 vm00.local ceph-mon[49980]: pgmap v1918: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:07 vm03.local ceph-mon[50983]: pgmap v1918: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:07.590 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:07.591 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:07.619 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:07.620 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:09 vm00.local ceph-mon[49980]: pgmap v1919: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:09 vm03.local ceph-mon[50983]: pgmap v1919: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:11 vm00.local ceph-mon[49980]: pgmap v1920: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:11 vm03.local ceph-mon[50983]: pgmap v1920: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:12.621 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:12.622 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:12.649 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:12.649 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:13.343 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:13 vm00.local ceph-mon[49980]: pgmap v1921: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:13 vm03.local ceph-mon[50983]: pgmap v1921: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:15.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:15 vm00.local ceph-mon[49980]: pgmap v1922: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:15 vm03.local ceph-mon[50983]: pgmap v1922: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:17 vm00.local ceph-mon[49980]: pgmap v1923: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:17 vm03.local ceph-mon[50983]: pgmap v1923: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:17.651 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:17.651 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:17.683 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:17.683 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:19 vm00.local ceph-mon[49980]: pgmap v1924: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:19 vm03.local ceph-mon[50983]: pgmap v1924: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:21 vm00.local ceph-mon[49980]: pgmap v1925: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:21 vm03.local ceph-mon[50983]: pgmap v1925: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:22.685 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:22.685 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:22.713 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:22.714 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:23 vm00.local ceph-mon[49980]: pgmap v1926: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:23 vm03.local ceph-mon[50983]: pgmap v1926: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:25 vm03.local ceph-mon[50983]: pgmap v1927: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:25 vm00.local ceph-mon[49980]: pgmap v1927: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:27.716 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:27.716 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:27.743 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:27.743 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:27 vm00.local ceph-mon[49980]: pgmap v1928: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:27 vm03.local ceph-mon[50983]: pgmap v1928: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:29 vm00.local ceph-mon[49980]: pgmap v1929: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:29 vm03.local ceph-mon[50983]: pgmap v1929: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:31 vm00.local ceph-mon[49980]: pgmap v1930: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:31 vm03.local ceph-mon[50983]: pgmap v1930: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:32.745 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:32.746 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:32.773 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:32.773 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:33 vm00.local ceph-mon[49980]: pgmap v1931: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:33 vm03.local ceph-mon[50983]: pgmap v1931: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:35 vm00.local ceph-mon[49980]: pgmap v1932: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:19:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:35 vm03.local ceph-mon[50983]: pgmap v1932: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:19:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:19:37.775 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:37.775 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:37 vm00.local ceph-mon[49980]: pgmap v1933: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:37.801 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:37.801 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:37 vm03.local ceph-mon[50983]: pgmap v1933: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:39.608 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:39 vm03.local ceph-mon[50983]: pgmap v1934: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:39 vm00.local ceph-mon[49980]: pgmap v1934: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:41 vm00.local ceph-mon[49980]: pgmap v1935: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:41 vm03.local ceph-mon[50983]: pgmap v1935: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:42.802 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:42.803 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:42.830 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:42.830 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:43.627 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:43 vm00.local ceph-mon[49980]: pgmap v1936: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:43 vm03.local ceph-mon[50983]: pgmap v1936: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:45 vm00.local ceph-mon[49980]: pgmap v1937: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:45 vm03.local ceph-mon[50983]: pgmap v1937: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:47 vm00.local ceph-mon[49980]: pgmap v1938: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:47 vm03.local ceph-mon[50983]: pgmap v1938: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:47.831 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:47.832 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:47.859 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:47.860 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:49 vm00.local ceph-mon[49980]: pgmap v1939: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:49 vm03.local ceph-mon[50983]: pgmap v1939: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:51 vm00.local ceph-mon[49980]: pgmap v1940: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:51 vm03.local ceph-mon[50983]: pgmap v1940: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:52.861 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:52.862 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:52.887 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:52.887 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:53.773 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:53 vm00.local ceph-mon[49980]: pgmap v1941: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:53 vm03.local ceph-mon[50983]: pgmap v1941: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:55 vm00.local ceph-mon[49980]: pgmap v1942: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:55 vm03.local ceph-mon[50983]: pgmap v1942: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:57 vm00.local ceph-mon[49980]: pgmap v1943: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:57 vm03.local ceph-mon[50983]: pgmap v1943: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:19:57.888 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:19:57.889 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:19:57.915 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:19:57.916 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:19:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:19:59 vm00.local ceph-mon[49980]: pgmap v1944: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:19:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:19:59 vm03.local ceph-mon[50983]: pgmap v1944: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:00.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T06:20:00.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T06:20:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:01 vm00.local ceph-mon[49980]: pgmap v1945: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:01 vm03.local ceph-mon[50983]: pgmap v1945: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:02.917 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:02.918 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:02.944 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:02.944 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:03 vm00.local ceph-mon[49980]: pgmap v1946: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:03 vm03.local ceph-mon[50983]: pgmap v1946: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:05 vm00.local ceph-mon[49980]: pgmap v1947: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:20:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:20:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:20:05.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:20:05.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:20:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:05 vm03.local ceph-mon[50983]: pgmap v1947: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:20:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:20:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:20:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:20:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:20:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:07 vm00.local ceph-mon[49980]: pgmap v1948: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:07 vm03.local ceph-mon[50983]: pgmap v1948: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:07.945 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:07.946 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:07.972 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:07.973 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:09 vm00.local ceph-mon[49980]: pgmap v1949: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:09 vm03.local ceph-mon[50983]: pgmap v1949: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:11 vm00.local ceph-mon[49980]: pgmap v1950: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:11 vm03.local ceph-mon[50983]: pgmap v1950: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:12.974 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:12.975 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:13.000 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:13.000 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:12 vm00.local ceph-mon[49980]: pgmap v1951: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:12 vm03.local ceph-mon[50983]: pgmap v1951: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:14 vm00.local ceph-mon[49980]: pgmap v1952: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:14 vm03.local ceph-mon[50983]: pgmap v1952: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:16 vm00.local ceph-mon[49980]: pgmap v1953: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:16 vm03.local ceph-mon[50983]: pgmap v1953: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:18.002 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:18.002 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:18.028 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:18.028 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:18 vm00.local ceph-mon[49980]: pgmap v1954: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:18 vm03.local ceph-mon[50983]: pgmap v1954: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:20 vm00.local ceph-mon[49980]: pgmap v1955: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:20 vm03.local ceph-mon[50983]: pgmap v1955: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:23.030 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:23.030 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:23.056 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:23.057 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:22 vm00.local ceph-mon[49980]: pgmap v1956: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:22 vm03.local ceph-mon[50983]: pgmap v1956: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:24 vm00.local ceph-mon[49980]: pgmap v1957: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:24 vm03.local ceph-mon[50983]: pgmap v1957: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:26 vm00.local ceph-mon[49980]: pgmap v1958: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:26 vm03.local ceph-mon[50983]: pgmap v1958: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:28.058 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:28.059 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:28.086 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:28.087 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:29 vm00.local ceph-mon[49980]: pgmap v1959: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:29 vm03.local ceph-mon[50983]: pgmap v1959: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:31.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:31 vm00.local ceph-mon[49980]: pgmap v1960: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:31 vm03.local ceph-mon[50983]: pgmap v1960: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:33.088 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:33.089 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:33.115 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:33.115 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:33 vm00.local ceph-mon[49980]: pgmap v1961: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:33 vm03.local ceph-mon[50983]: pgmap v1961: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:35 vm00.local ceph-mon[49980]: pgmap v1962: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:35 vm03.local ceph-mon[50983]: pgmap v1962: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:20:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:20:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:20:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:20:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:37 vm00.local ceph-mon[49980]: pgmap v1963: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:37 vm03.local ceph-mon[50983]: pgmap v1963: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:38.117 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:38.117 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:38.143 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:38.143 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:39 vm00.local ceph-mon[49980]: pgmap v1964: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:39 vm03.local ceph-mon[50983]: pgmap v1964: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:41.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:41 vm00.local ceph-mon[49980]: pgmap v1965: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:41 vm03.local ceph-mon[50983]: pgmap v1965: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:43.144 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:43.167 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:43.240 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:43.240 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:43 vm00.local ceph-mon[49980]: pgmap v1966: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:43 vm03.local ceph-mon[50983]: pgmap v1966: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:45 vm00.local ceph-mon[49980]: pgmap v1967: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:45 vm03.local ceph-mon[50983]: pgmap v1967: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:47 vm00.local ceph-mon[49980]: pgmap v1968: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:47 vm03.local ceph-mon[50983]: pgmap v1968: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:48.242 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:48.243 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:48.499 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:48.500 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:48.934 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:48 vm00.local ceph-mon[49980]: pgmap v1969: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:48 vm03.local ceph-mon[50983]: pgmap v1969: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:51.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:50 vm00.local ceph-mon[49980]: pgmap v1970: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:50 vm03.local ceph-mon[50983]: pgmap v1970: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:53.502 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:53.503 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:53.529 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:53.530 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:53 vm00.local ceph-mon[49980]: pgmap v1971: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:53 vm03.local ceph-mon[50983]: pgmap v1971: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:56.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:55 vm00.local ceph-mon[49980]: pgmap v1972: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:56.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:55 vm03.local ceph-mon[50983]: pgmap v1972: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:57.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:56 vm00.local ceph-mon[49980]: pgmap v1973: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:57.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:56 vm03.local ceph-mon[50983]: pgmap v1973: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:20:58.531 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:20:58.532 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:20:58.557 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:20:58.558 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:20:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:20:58 vm00.local ceph-mon[49980]: pgmap v1974: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:20:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:20:58 vm03.local ceph-mon[50983]: pgmap v1974: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:00 vm00.local ceph-mon[49980]: pgmap v1975: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:01 vm03.local ceph-mon[50983]: pgmap v1975: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:03 vm00.local ceph-mon[49980]: pgmap v1976: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:03 vm03.local ceph-mon[50983]: pgmap v1976: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:03.559 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:03.559 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:03.589 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:03.590 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:05 vm00.local ceph-mon[49980]: pgmap v1977: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:21:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:21:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:21:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:05 vm03.local ceph-mon[50983]: pgmap v1977: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:21:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:21:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:21:06.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:21:06.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:21:06.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:21:06.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:21:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:07 vm00.local ceph-mon[49980]: pgmap v1978: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:07 vm03.local ceph-mon[50983]: pgmap v1978: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:08.591 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:08.592 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:08.623 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:08.623 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:09 vm00.local ceph-mon[49980]: pgmap v1979: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:09 vm03.local ceph-mon[50983]: pgmap v1979: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:11 vm00.local ceph-mon[49980]: pgmap v1980: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:11 vm03.local ceph-mon[50983]: pgmap v1980: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:13 vm00.local ceph-mon[49980]: pgmap v1981: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:13 vm03.local ceph-mon[50983]: pgmap v1981: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:13.624 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:13.625 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:13.650 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:13.651 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:15 vm00.local ceph-mon[49980]: pgmap v1982: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:15 vm03.local ceph-mon[50983]: pgmap v1982: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:17.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:17 vm00.local ceph-mon[49980]: pgmap v1983: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:17 vm03.local ceph-mon[50983]: pgmap v1983: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:18.652 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:18.653 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:18.707 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:18.708 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:19 vm00.local ceph-mon[49980]: pgmap v1984: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:19.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:19 vm03.local ceph-mon[50983]: pgmap v1984: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:21 vm00.local ceph-mon[49980]: pgmap v1985: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:21 vm03.local ceph-mon[50983]: pgmap v1985: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:23 vm00.local ceph-mon[49980]: pgmap v1986: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:23 vm03.local ceph-mon[50983]: pgmap v1986: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:23.710 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:23.710 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:23.738 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:23.739 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:25 vm00.local ceph-mon[49980]: pgmap v1987: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:25 vm03.local ceph-mon[50983]: pgmap v1987: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:27 vm00.local ceph-mon[49980]: pgmap v1988: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:27 vm03.local ceph-mon[50983]: pgmap v1988: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:28.740 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:28.740 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:28.934 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:28.935 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:29 vm00.local ceph-mon[49980]: pgmap v1989: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:29 vm03.local ceph-mon[50983]: pgmap v1989: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:31 vm00.local ceph-mon[49980]: pgmap v1990: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:31 vm03.local ceph-mon[50983]: pgmap v1990: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:33 vm00.local ceph-mon[49980]: pgmap v1991: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:33 vm03.local ceph-mon[50983]: pgmap v1991: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:33.936 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:33.937 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:33.965 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:33.965 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:35 vm00.local ceph-mon[49980]: pgmap v1992: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:21:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:21:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:35 vm03.local ceph-mon[50983]: pgmap v1992: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:21:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:21:38.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:37 vm00.local ceph-mon[49980]: pgmap v1993: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:38.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:37 vm03.local ceph-mon[50983]: pgmap v1993: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:38.967 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:38.967 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:38 vm00.local ceph-mon[49980]: pgmap v1994: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:39.282 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:39.291 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:38 vm03.local ceph-mon[50983]: pgmap v1994: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:41 vm00.local ceph-mon[49980]: pgmap v1995: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:41 vm03.local ceph-mon[50983]: pgmap v1995: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:43 vm00.local ceph-mon[49980]: pgmap v1996: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:43 vm03.local ceph-mon[50983]: pgmap v1996: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:44.284 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:44.284 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:44.310 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:44.310 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:45 vm03.local ceph-mon[50983]: pgmap v1997: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:45 vm00.local ceph-mon[49980]: pgmap v1997: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:47 vm00.local ceph-mon[49980]: pgmap v1998: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:47 vm03.local ceph-mon[50983]: pgmap v1998: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:49.311 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:49.312 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:49.338 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:49.338 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:49 vm00.local ceph-mon[49980]: pgmap v1999: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:49 vm03.local ceph-mon[50983]: pgmap v1999: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:51 vm00.local ceph-mon[49980]: pgmap v2000: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:51 vm03.local ceph-mon[50983]: pgmap v2000: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:53 vm00.local ceph-mon[49980]: pgmap v2001: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:53 vm03.local ceph-mon[50983]: pgmap v2001: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:54.339 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:54.340 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:54.367 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:54.368 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:55 vm00.local ceph-mon[49980]: pgmap v2002: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:55 vm03.local ceph-mon[50983]: pgmap v2002: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:57 vm03.local ceph-mon[50983]: pgmap v2003: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:57 vm00.local ceph-mon[49980]: pgmap v2003: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:21:59.369 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:21:59.370 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:21:59.396 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:21:59.397 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:21:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:21:59 vm03.local ceph-mon[50983]: pgmap v2004: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:21:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:21:59 vm00.local ceph-mon[49980]: pgmap v2004: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:01 vm03.local ceph-mon[50983]: pgmap v2005: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:01 vm00.local ceph-mon[49980]: pgmap v2005: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:03 vm00.local ceph-mon[49980]: pgmap v2006: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:03 vm03.local ceph-mon[50983]: pgmap v2006: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:04.398 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:04.399 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:04.427 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:04.427 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:05 vm00.local ceph-mon[49980]: pgmap v2007: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:22:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:22:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:05 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:22:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:05 vm03.local ceph-mon[50983]: pgmap v2007: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:22:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:22:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:05 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:22:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:22:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:22:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:22:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:22:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:07 vm00.local ceph-mon[49980]: pgmap v2008: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:07 vm03.local ceph-mon[50983]: pgmap v2008: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:09.429 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:09.429 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:09.456 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:09.456 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:09 vm00.local ceph-mon[49980]: pgmap v2009: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:09 vm03.local ceph-mon[50983]: pgmap v2009: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:10 vm00.local ceph-mon[49980]: pgmap v2010: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:10 vm03.local ceph-mon[50983]: pgmap v2010: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:13 vm00.local ceph-mon[49980]: pgmap v2011: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:13 vm03.local ceph-mon[50983]: pgmap v2011: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:14.458 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:14.458 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:14.519 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:14.520 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:15 vm00.local ceph-mon[49980]: pgmap v2012: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:15 vm03.local ceph-mon[50983]: pgmap v2012: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:17 vm00.local ceph-mon[49980]: pgmap v2013: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:17 vm03.local ceph-mon[50983]: pgmap v2013: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:19.522 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:19.522 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:19.595 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:19.595 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:19 vm00.local ceph-mon[49980]: pgmap v2014: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:19 vm03.local ceph-mon[50983]: pgmap v2014: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:21.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:21 vm00.local ceph-mon[49980]: pgmap v2015: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:21 vm03.local ceph-mon[50983]: pgmap v2015: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:23.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:23 vm00.local ceph-mon[49980]: pgmap v2016: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:23 vm03.local ceph-mon[50983]: pgmap v2016: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:24.597 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:24.597 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:24.633 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:24.633 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:25 vm00.local ceph-mon[49980]: pgmap v2017: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:25 vm03.local ceph-mon[50983]: pgmap v2017: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:27 vm00.local ceph-mon[49980]: pgmap v2018: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:27 vm03.local ceph-mon[50983]: pgmap v2018: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:29.635 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:29.635 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:29.668 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:29.669 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:29 vm00.local ceph-mon[49980]: pgmap v2019: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:29 vm03.local ceph-mon[50983]: pgmap v2019: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:31 vm00.local ceph-mon[49980]: pgmap v2020: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:31 vm03.local ceph-mon[50983]: pgmap v2020: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:33 vm00.local ceph-mon[49980]: pgmap v2021: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:33 vm03.local ceph-mon[50983]: pgmap v2021: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:34.671 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:34.671 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:34.708 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:34.709 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:35 vm00.local ceph-mon[49980]: pgmap v2022: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:22:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:22:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:35 vm03.local ceph-mon[50983]: pgmap v2022: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:22:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:22:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:37 vm00.local ceph-mon[49980]: pgmap v2023: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:37 vm03.local ceph-mon[50983]: pgmap v2023: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:39.710 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:39.711 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:39.751 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:39.752 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:39 vm00.local ceph-mon[49980]: pgmap v2024: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:39 vm03.local ceph-mon[50983]: pgmap v2024: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:41 vm00.local ceph-mon[49980]: pgmap v2025: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:41 vm03.local ceph-mon[50983]: pgmap v2025: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:43 vm00.local ceph-mon[49980]: pgmap v2026: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:43 vm03.local ceph-mon[50983]: pgmap v2026: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:44.754 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:44.754 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:44.780 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:44.780 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:44 vm00.local ceph-mon[49980]: pgmap v2027: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:44 vm03.local ceph-mon[50983]: pgmap v2027: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:47.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:46 vm00.local ceph-mon[49980]: pgmap v2028: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:46 vm03.local ceph-mon[50983]: pgmap v2028: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:49 vm00.local ceph-mon[49980]: pgmap v2029: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:49.782 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:49.782 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:49 vm03.local ceph-mon[50983]: pgmap v2029: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:49.823 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:49.823 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:51 vm00.local ceph-mon[49980]: pgmap v2030: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:51 vm03.local ceph-mon[50983]: pgmap v2030: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:53.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:53 vm00.local ceph-mon[49980]: pgmap v2031: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:53 vm03.local ceph-mon[50983]: pgmap v2031: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:54.824 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:54.825 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:54.851 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:54.852 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:22:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:55 vm00.local ceph-mon[49980]: pgmap v2032: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:55 vm03.local ceph-mon[50983]: pgmap v2032: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:57 vm00.local ceph-mon[49980]: pgmap v2033: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:57 vm03.local ceph-mon[50983]: pgmap v2033: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:22:58.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:22:58 vm00.local ceph-mon[49980]: pgmap v2034: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:58.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:22:58 vm03.local ceph-mon[50983]: pgmap v2034: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:22:59.853 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:22:59.854 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:22:59.883 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:22:59.884 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:00 vm00.local ceph-mon[49980]: pgmap v2035: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:00 vm03.local ceph-mon[50983]: pgmap v2035: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:03 vm00.local ceph-mon[49980]: pgmap v2036: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:03 vm03.local ceph-mon[50983]: pgmap v2036: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:04.886 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:04.887 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:04.949 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:04.949 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:05.669 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:05 vm03.local ceph-mon[50983]: pgmap v2037: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:05.671 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:05 vm00.local ceph-mon[49980]: pgmap v2037: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:23:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:23:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:23:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:23:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:23:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:23:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:23:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:23:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:23:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:23:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:07 vm00.local ceph-mon[49980]: pgmap v2038: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:07 vm03.local ceph-mon[50983]: pgmap v2038: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:09 vm00.local ceph-mon[49980]: pgmap v2039: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:09 vm03.local ceph-mon[50983]: pgmap v2039: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:09.951 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:09.951 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:09.976 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:09.977 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:11 vm00.local ceph-mon[49980]: pgmap v2040: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:11 vm03.local ceph-mon[50983]: pgmap v2040: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:13.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:13 vm00.local ceph-mon[49980]: pgmap v2041: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:13 vm03.local ceph-mon[50983]: pgmap v2041: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:14.979 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:14.979 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:14 vm00.local ceph-mon[49980]: pgmap v2042: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:15.033 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:15.034 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:14 vm03.local ceph-mon[50983]: pgmap v2042: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:17.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:16 vm00.local ceph-mon[49980]: pgmap v2043: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:16 vm03.local ceph-mon[50983]: pgmap v2043: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:19 vm00.local ceph-mon[49980]: pgmap v2044: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:19 vm03.local ceph-mon[50983]: pgmap v2044: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:20.035 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:20.036 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:20.067 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:20.068 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:21 vm00.local ceph-mon[49980]: pgmap v2045: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:21 vm03.local ceph-mon[50983]: pgmap v2045: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:23 vm00.local ceph-mon[49980]: pgmap v2046: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:23 vm03.local ceph-mon[50983]: pgmap v2046: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:25.069 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:25.070 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:25.098 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:25.099 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:25 vm00.local ceph-mon[49980]: pgmap v2047: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:25 vm03.local ceph-mon[50983]: pgmap v2047: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:27 vm00.local ceph-mon[49980]: pgmap v2048: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:27 vm03.local ceph-mon[50983]: pgmap v2048: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:29 vm00.local ceph-mon[49980]: pgmap v2049: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:29 vm03.local ceph-mon[50983]: pgmap v2049: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:30.100 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:30.100 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:30.127 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:30.128 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:31 vm00.local ceph-mon[49980]: pgmap v2050: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:31 vm03.local ceph-mon[50983]: pgmap v2050: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:33 vm00.local ceph-mon[49980]: pgmap v2051: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:33 vm03.local ceph-mon[50983]: pgmap v2051: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:35.129 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:35.129 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:35.155 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:35.156 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:35 vm00.local ceph-mon[49980]: pgmap v2052: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:23:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:23:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:35 vm03.local ceph-mon[50983]: pgmap v2052: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:23:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:23:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:37 vm00.local ceph-mon[49980]: pgmap v2053: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:37 vm03.local ceph-mon[50983]: pgmap v2053: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:39 vm00.local ceph-mon[49980]: pgmap v2054: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:39 vm03.local ceph-mon[50983]: pgmap v2054: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:40.158 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:40.158 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:40.288 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:40.289 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:40 vm00.local ceph-mon[49980]: pgmap v2055: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:40 vm03.local ceph-mon[50983]: pgmap v2055: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:43 vm00.local ceph-mon[49980]: pgmap v2056: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:43 vm03.local ceph-mon[50983]: pgmap v2056: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:45.291 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:45.291 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:45.318 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:45.318 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:45 vm00.local ceph-mon[49980]: pgmap v2057: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:45 vm03.local ceph-mon[50983]: pgmap v2057: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:47 vm00.local ceph-mon[49980]: pgmap v2058: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:47 vm03.local ceph-mon[50983]: pgmap v2058: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:49 vm00.local ceph-mon[49980]: pgmap v2059: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:49 vm03.local ceph-mon[50983]: pgmap v2059: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:50.320 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:50.320 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:50.349 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:50.349 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:51 vm00.local ceph-mon[49980]: pgmap v2060: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:51 vm03.local ceph-mon[50983]: pgmap v2060: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:53 vm00.local ceph-mon[49980]: pgmap v2061: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:53 vm03.local ceph-mon[50983]: pgmap v2061: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:55.351 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:23:55.351 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:23:55.379 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:23:55.379 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:23:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:55 vm00.local ceph-mon[49980]: pgmap v2062: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:55 vm03.local ceph-mon[50983]: pgmap v2062: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:57 vm00.local ceph-mon[49980]: pgmap v2063: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:57 vm03.local ceph-mon[50983]: pgmap v2063: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:23:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:23:59 vm00.local ceph-mon[49980]: pgmap v2064: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:23:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:23:59 vm03.local ceph-mon[50983]: pgmap v2064: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:00.381 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:00.382 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:00.409 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:00.410 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:01 vm00.local ceph-mon[49980]: pgmap v2065: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:01 vm03.local ceph-mon[50983]: pgmap v2065: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:03 vm00.local ceph-mon[49980]: pgmap v2066: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:03 vm03.local ceph-mon[50983]: pgmap v2066: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:05.411 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:05.412 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:05.438 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:05.439 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:05.718 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:05 vm03.local ceph-mon[50983]: pgmap v2067: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:05.721 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:05 vm00.local ceph-mon[49980]: pgmap v2067: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:24:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:24:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:24:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:24:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:24:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:24:06.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:24:06.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:24:06.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:24:06.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:24:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:07 vm00.local ceph-mon[49980]: pgmap v2068: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:07 vm03.local ceph-mon[50983]: pgmap v2068: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:09.609 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:09 vm03.local ceph-mon[50983]: pgmap v2069: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:09 vm00.local ceph-mon[49980]: pgmap v2069: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:10.440 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:10.441 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:10.470 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:10.470 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:11 vm00.local ceph-mon[49980]: pgmap v2070: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:11 vm03.local ceph-mon[50983]: pgmap v2070: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:13.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:13 vm00.local ceph-mon[49980]: pgmap v2071: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:13 vm03.local ceph-mon[50983]: pgmap v2071: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:15.472 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:15.472 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:15.498 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:15.499 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:15 vm00.local ceph-mon[49980]: pgmap v2072: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:15 vm03.local ceph-mon[50983]: pgmap v2072: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:17 vm00.local ceph-mon[49980]: pgmap v2073: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:17 vm03.local ceph-mon[50983]: pgmap v2073: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:19 vm00.local ceph-mon[49980]: pgmap v2074: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:19 vm03.local ceph-mon[50983]: pgmap v2074: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:20.500 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:20.501 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:20.530 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:20.530 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:21 vm00.local ceph-mon[49980]: pgmap v2075: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:21 vm03.local ceph-mon[50983]: pgmap v2075: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:23 vm00.local ceph-mon[49980]: pgmap v2076: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:23 vm03.local ceph-mon[50983]: pgmap v2076: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:25.532 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:25.532 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:25.558 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:25.559 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:25 vm00.local ceph-mon[49980]: pgmap v2077: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:25 vm03.local ceph-mon[50983]: pgmap v2077: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:27 vm00.local ceph-mon[49980]: pgmap v2078: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:27 vm03.local ceph-mon[50983]: pgmap v2078: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:28.953 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:28 vm00.local ceph-mon[49980]: pgmap v2079: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:28 vm03.local ceph-mon[50983]: pgmap v2079: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:30.560 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:30.561 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:30.588 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:30.589 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:30 vm00.local ceph-mon[49980]: pgmap v2080: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:30 vm03.local ceph-mon[50983]: pgmap v2080: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:33 vm00.local ceph-mon[49980]: pgmap v2081: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:33 vm03.local ceph-mon[50983]: pgmap v2081: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:35.590 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:35.591 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:35.618 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:35.618 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:35 vm00.local ceph-mon[49980]: pgmap v2082: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:24:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:24:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:35 vm03.local ceph-mon[50983]: pgmap v2082: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:24:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:24:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:37 vm00.local ceph-mon[49980]: pgmap v2083: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:37 vm03.local ceph-mon[50983]: pgmap v2083: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:39 vm00.local ceph-mon[49980]: pgmap v2084: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:39 vm03.local ceph-mon[50983]: pgmap v2084: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:40.620 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:40.620 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:40.648 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:40.648 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:41 vm00.local ceph-mon[49980]: pgmap v2085: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:41 vm03.local ceph-mon[50983]: pgmap v2085: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:43 vm00.local ceph-mon[49980]: pgmap v2086: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:43 vm03.local ceph-mon[50983]: pgmap v2086: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:45.650 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:45.650 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:45.681 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:45.682 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:45 vm00.local ceph-mon[49980]: pgmap v2087: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:45 vm03.local ceph-mon[50983]: pgmap v2087: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:47 vm00.local ceph-mon[49980]: pgmap v2088: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:47 vm03.local ceph-mon[50983]: pgmap v2088: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:49 vm00.local ceph-mon[49980]: pgmap v2089: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:49 vm03.local ceph-mon[50983]: pgmap v2089: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:50.683 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:50.684 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:50.711 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:50.712 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:51 vm00.local ceph-mon[49980]: pgmap v2090: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:51 vm03.local ceph-mon[50983]: pgmap v2090: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:53 vm00.local ceph-mon[49980]: pgmap v2091: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:53 vm03.local ceph-mon[50983]: pgmap v2091: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:55.713 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:24:55.714 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:24:55.742 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:24:55.742 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:24:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:55 vm00.local ceph-mon[49980]: pgmap v2092: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:55 vm03.local ceph-mon[50983]: pgmap v2092: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:57 vm00.local ceph-mon[49980]: pgmap v2093: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:57 vm03.local ceph-mon[50983]: pgmap v2093: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:24:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:24:59 vm00.local ceph-mon[49980]: pgmap v2094: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:24:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:24:59 vm03.local ceph-mon[50983]: pgmap v2094: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:00.744 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:00.744 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:00.771 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:00.772 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:01 vm00.local ceph-mon[49980]: pgmap v2095: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:01 vm03.local ceph-mon[50983]: pgmap v2095: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:03 vm00.local ceph-mon[49980]: pgmap v2096: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:03 vm03.local ceph-mon[50983]: pgmap v2096: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:05.773 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:05.774 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:05.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:05 vm00.local ceph-mon[49980]: pgmap v2097: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:05.802 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:05.802 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:05 vm03.local ceph-mon[50983]: pgmap v2097: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:25:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:25:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:25:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:25:07.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:25:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:25:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:25:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:25:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:25:07.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:25:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:07 vm00.local ceph-mon[49980]: pgmap v2098: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:07 vm03.local ceph-mon[50983]: pgmap v2098: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:08.956 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:08 vm00.local ceph-mon[49980]: pgmap v2099: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:08 vm03.local ceph-mon[50983]: pgmap v2099: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:10.804 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:10.804 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:10.830 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:10.831 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:10 vm00.local ceph-mon[49980]: pgmap v2100: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:10 vm03.local ceph-mon[50983]: pgmap v2100: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:13.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:13 vm00.local ceph-mon[49980]: pgmap v2101: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:13 vm03.local ceph-mon[50983]: pgmap v2101: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:15 vm00.local ceph-mon[49980]: pgmap v2102: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:15 vm03.local ceph-mon[50983]: pgmap v2102: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:15.832 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:15.833 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:15.859 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:15.859 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:17 vm00.local ceph-mon[49980]: pgmap v2103: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:17 vm03.local ceph-mon[50983]: pgmap v2103: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:19 vm00.local ceph-mon[49980]: pgmap v2104: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:19 vm03.local ceph-mon[50983]: pgmap v2104: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:20.861 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:20.861 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:20.888 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:20.889 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:21 vm00.local ceph-mon[49980]: pgmap v2105: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:21 vm03.local ceph-mon[50983]: pgmap v2105: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:23 vm00.local ceph-mon[49980]: pgmap v2106: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:23 vm03.local ceph-mon[50983]: pgmap v2106: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:25 vm00.local ceph-mon[49980]: pgmap v2107: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:25 vm03.local ceph-mon[50983]: pgmap v2107: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:25.890 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:25.890 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:25.917 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:25.917 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:27 vm00.local ceph-mon[49980]: pgmap v2108: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:27 vm03.local ceph-mon[50983]: pgmap v2108: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:29 vm00.local ceph-mon[49980]: pgmap v2109: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:29 vm03.local ceph-mon[50983]: pgmap v2109: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:30.918 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:30.919 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:30.945 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:30.946 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:31 vm00.local ceph-mon[49980]: pgmap v2110: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:31 vm03.local ceph-mon[50983]: pgmap v2110: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:33 vm00.local ceph-mon[49980]: pgmap v2111: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:33 vm03.local ceph-mon[50983]: pgmap v2111: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:35 vm00.local ceph-mon[49980]: pgmap v2112: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:25:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:35 vm03.local ceph-mon[50983]: pgmap v2112: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:25:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:25:35.947 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:35.948 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:35.976 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:35.976 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:37 vm00.local ceph-mon[49980]: pgmap v2113: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:37 vm03.local ceph-mon[50983]: pgmap v2113: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:39 vm00.local ceph-mon[49980]: pgmap v2114: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:39 vm03.local ceph-mon[50983]: pgmap v2114: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:40.978 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:40.978 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:41.003 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:41.004 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:41 vm00.local ceph-mon[49980]: pgmap v2115: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:41 vm03.local ceph-mon[50983]: pgmap v2115: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:43 vm00.local ceph-mon[49980]: pgmap v2116: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:43 vm03.local ceph-mon[50983]: pgmap v2116: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:45 vm00.local ceph-mon[49980]: pgmap v2117: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:45 vm03.local ceph-mon[50983]: pgmap v2117: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:46.005 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:46.005 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:46.031 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:46.032 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:47 vm00.local ceph-mon[49980]: pgmap v2118: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:47 vm03.local ceph-mon[50983]: pgmap v2118: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:49 vm00.local ceph-mon[49980]: pgmap v2119: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:49 vm03.local ceph-mon[50983]: pgmap v2119: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:51.034 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:51.034 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:51.062 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:51.062 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:51 vm00.local ceph-mon[49980]: pgmap v2120: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:51 vm03.local ceph-mon[50983]: pgmap v2120: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:53 vm00.local ceph-mon[49980]: pgmap v2121: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:53 vm03.local ceph-mon[50983]: pgmap v2121: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:55 vm00.local ceph-mon[49980]: pgmap v2122: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:55 vm03.local ceph-mon[50983]: pgmap v2122: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:56.063 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:25:56.064 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:25:56.092 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:25:56.093 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:25:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:57 vm00.local ceph-mon[49980]: pgmap v2123: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:57 vm03.local ceph-mon[50983]: pgmap v2123: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:25:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:25:59 vm00.local ceph-mon[49980]: pgmap v2124: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:25:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:25:59 vm03.local ceph-mon[50983]: pgmap v2124: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:01.094 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:01.094 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:01.121 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:01.121 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:01 vm00.local ceph-mon[49980]: pgmap v2125: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:01 vm03.local ceph-mon[50983]: pgmap v2125: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:03 vm00.local ceph-mon[49980]: pgmap v2126: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:03 vm03.local ceph-mon[50983]: pgmap v2126: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:05 vm00.local ceph-mon[49980]: pgmap v2127: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:05 vm03.local ceph-mon[50983]: pgmap v2127: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:06.123 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:06.123 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:06.150 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:06.150 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:06.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:26:06.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:26:06.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:06 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:26:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:26:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:26:06.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:06 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:26:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:07 vm00.local ceph-mon[49980]: pgmap v2128: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:26:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:26:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:07 vm03.local ceph-mon[50983]: pgmap v2128: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:26:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:26:08.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:08 vm00.local ceph-mon[49980]: pgmap v2129: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:08 vm03.local ceph-mon[50983]: pgmap v2129: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:11.152 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:11.152 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:11.180 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:11.181 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:10 vm00.local ceph-mon[49980]: pgmap v2130: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:10 vm03.local ceph-mon[50983]: pgmap v2130: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:13.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:13 vm00.local ceph-mon[49980]: pgmap v2131: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:13 vm03.local ceph-mon[50983]: pgmap v2131: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:15 vm00.local ceph-mon[49980]: pgmap v2132: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:15 vm03.local ceph-mon[50983]: pgmap v2132: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:16.182 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:16.183 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:16.264 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:16.265 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:17 vm00.local ceph-mon[49980]: pgmap v2133: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:17 vm03.local ceph-mon[50983]: pgmap v2133: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:19 vm00.local ceph-mon[49980]: pgmap v2134: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:19 vm03.local ceph-mon[50983]: pgmap v2134: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:21.267 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:21.267 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:21.294 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:21.294 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:21 vm00.local ceph-mon[49980]: pgmap v2135: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:21 vm03.local ceph-mon[50983]: pgmap v2135: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:23 vm00.local ceph-mon[49980]: pgmap v2136: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:23 vm03.local ceph-mon[50983]: pgmap v2136: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:25 vm00.local ceph-mon[49980]: pgmap v2137: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:25 vm03.local ceph-mon[50983]: pgmap v2137: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:26.296 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:26.296 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:26.322 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:26.323 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:27 vm00.local ceph-mon[49980]: pgmap v2138: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:27 vm03.local ceph-mon[50983]: pgmap v2138: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:29 vm00.local ceph-mon[49980]: pgmap v2139: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:29 vm03.local ceph-mon[50983]: pgmap v2139: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:31.324 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:31.324 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:31.353 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:31.353 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:31 vm00.local ceph-mon[49980]: pgmap v2140: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:31 vm03.local ceph-mon[50983]: pgmap v2140: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:33 vm00.local ceph-mon[49980]: pgmap v2141: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:33 vm03.local ceph-mon[50983]: pgmap v2141: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:35 vm00.local ceph-mon[49980]: pgmap v2142: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:26:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:26:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:35 vm03.local ceph-mon[50983]: pgmap v2142: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:26:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:26:36.354 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:36.355 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:36.381 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:36.381 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:36 vm00.local ceph-mon[49980]: pgmap v2143: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:36 vm03.local ceph-mon[50983]: pgmap v2143: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:39.758 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:39 vm03.local ceph-mon[50983]: pgmap v2144: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:39 vm00.local ceph-mon[49980]: pgmap v2144: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:41.383 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:41.383 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:41.409 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:41.410 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:41 vm00.local ceph-mon[49980]: pgmap v2145: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:41 vm03.local ceph-mon[50983]: pgmap v2145: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:43 vm00.local ceph-mon[49980]: pgmap v2146: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:43 vm03.local ceph-mon[50983]: pgmap v2146: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:45 vm00.local ceph-mon[49980]: pgmap v2147: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:45 vm03.local ceph-mon[50983]: pgmap v2147: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:46.411 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:46.411 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:46.439 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:46.439 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:47 vm00.local ceph-mon[49980]: pgmap v2148: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:47 vm03.local ceph-mon[50983]: pgmap v2148: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:49 vm00.local ceph-mon[49980]: pgmap v2149: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:49 vm03.local ceph-mon[50983]: pgmap v2149: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:51.441 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:51.441 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:51.552 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:51.552 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:51 vm03.local ceph-mon[50983]: pgmap v2150: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:51 vm00.local ceph-mon[49980]: pgmap v2150: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:53 vm00.local ceph-mon[49980]: pgmap v2151: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:53 vm03.local ceph-mon[50983]: pgmap v2151: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:55 vm00.local ceph-mon[49980]: pgmap v2152: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:55 vm03.local ceph-mon[50983]: pgmap v2152: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:56.554 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:26:56.555 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:26:56.582 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:26:56.583 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:26:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:57 vm00.local ceph-mon[49980]: pgmap v2153: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:57 vm03.local ceph-mon[50983]: pgmap v2153: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:26:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:26:59 vm00.local ceph-mon[49980]: pgmap v2154: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:26:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:26:59 vm03.local ceph-mon[50983]: pgmap v2154: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:01.584 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:01.585 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:01.611 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:01.612 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:01 vm00.local ceph-mon[49980]: pgmap v2155: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:01 vm03.local ceph-mon[50983]: pgmap v2155: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:03 vm00.local ceph-mon[49980]: pgmap v2156: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:03 vm03.local ceph-mon[50983]: pgmap v2156: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:05.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:05 vm00.local ceph-mon[49980]: pgmap v2157: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:05 vm03.local ceph-mon[50983]: pgmap v2157: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:06.615 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:06.615 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:06.711 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:06.712 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:07.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:07 vm00.local ceph-mon[49980]: pgmap v2158: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:07.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:27:07.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:27:07.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:27:07.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:27:07.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:27:07.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:27:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:07 vm03.local ceph-mon[50983]: pgmap v2158: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:27:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:27:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:27:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:27:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:27:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:07 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:27:09.609 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:09 vm03.local ceph-mon[50983]: pgmap v2159: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:09 vm00.local ceph-mon[49980]: pgmap v2159: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:11.713 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:11.714 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:11.741 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:11.741 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:11 vm00.local ceph-mon[49980]: pgmap v2160: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:11 vm03.local ceph-mon[50983]: pgmap v2160: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:13.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:13 vm00.local ceph-mon[49980]: pgmap v2161: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:13 vm03.local ceph-mon[50983]: pgmap v2161: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:15 vm00.local ceph-mon[49980]: pgmap v2162: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:15 vm03.local ceph-mon[50983]: pgmap v2162: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:16.743 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:16.743 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:16.770 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:16.770 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:17 vm03.local ceph-mon[50983]: pgmap v2163: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:17 vm00.local ceph-mon[49980]: pgmap v2163: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:18.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:18 vm03.local ceph-mon[50983]: pgmap v2164: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:18.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:18 vm00.local ceph-mon[49980]: pgmap v2164: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:20 vm00.local ceph-mon[49980]: pgmap v2165: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:20 vm03.local ceph-mon[50983]: pgmap v2165: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:21.771 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:21.773 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:21.843 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:21.843 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:23 vm00.local ceph-mon[49980]: pgmap v2166: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:23 vm03.local ceph-mon[50983]: pgmap v2166: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:25 vm00.local ceph-mon[49980]: pgmap v2167: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:25 vm03.local ceph-mon[50983]: pgmap v2167: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:26.845 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:26.845 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:26.870 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:26.871 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:27 vm00.local ceph-mon[49980]: pgmap v2168: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:27 vm03.local ceph-mon[50983]: pgmap v2168: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:29 vm00.local ceph-mon[49980]: pgmap v2169: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:29 vm03.local ceph-mon[50983]: pgmap v2169: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:31 vm00.local ceph-mon[49980]: pgmap v2170: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:31 vm03.local ceph-mon[50983]: pgmap v2170: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:31.872 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:31.873 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:31.900 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:31.901 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:33.846 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:33 vm00.local ceph-mon[49980]: pgmap v2171: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:33 vm03.local ceph-mon[50983]: pgmap v2171: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:34 vm00.local ceph-mon[49980]: pgmap v2172: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:34 vm03.local ceph-mon[50983]: pgmap v2172: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:27:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:27:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:27:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:27:36.902 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:36.903 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:36.999 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:37.000 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:36 vm00.local ceph-mon[49980]: pgmap v2173: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:36 vm03.local ceph-mon[50983]: pgmap v2173: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:39.759 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:39 vm03.local ceph-mon[50983]: pgmap v2174: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:39 vm00.local ceph-mon[49980]: pgmap v2174: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:41 vm00.local ceph-mon[49980]: pgmap v2175: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:41 vm03.local ceph-mon[50983]: pgmap v2175: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:42.001 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:42.002 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:42.027 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:42.028 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:43 vm00.local ceph-mon[49980]: pgmap v2176: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:43 vm03.local ceph-mon[50983]: pgmap v2176: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:45 vm00.local ceph-mon[49980]: pgmap v2177: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:45 vm03.local ceph-mon[50983]: pgmap v2177: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:47.029 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:47.029 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:47.054 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:47.055 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:47 vm00.local ceph-mon[49980]: pgmap v2178: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:47 vm03.local ceph-mon[50983]: pgmap v2178: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:49 vm00.local ceph-mon[49980]: pgmap v2179: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:49 vm03.local ceph-mon[50983]: pgmap v2179: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:51 vm00.local ceph-mon[49980]: pgmap v2180: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:51 vm03.local ceph-mon[50983]: pgmap v2180: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:52.056 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:52.057 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:52.086 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:52.087 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:53 vm00.local ceph-mon[49980]: pgmap v2181: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:53 vm03.local ceph-mon[50983]: pgmap v2181: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:55 vm00.local ceph-mon[49980]: pgmap v2182: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:55 vm03.local ceph-mon[50983]: pgmap v2182: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:57.088 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:27:57.088 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:27:57.115 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:27:57.116 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:27:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:57 vm00.local ceph-mon[49980]: pgmap v2183: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:57 vm03.local ceph-mon[50983]: pgmap v2183: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:27:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:27:59 vm00.local ceph-mon[49980]: pgmap v2184: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:27:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:27:59 vm03.local ceph-mon[50983]: pgmap v2184: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:01 vm00.local ceph-mon[49980]: pgmap v2185: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:01 vm03.local ceph-mon[50983]: pgmap v2185: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:02.117 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:02.117 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:02.144 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:02.145 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:03 vm00.local ceph-mon[49980]: pgmap v2186: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:03 vm03.local ceph-mon[50983]: pgmap v2186: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:05 vm00.local ceph-mon[49980]: pgmap v2187: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:05 vm03.local ceph-mon[50983]: pgmap v2187: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:07.146 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:07.146 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:07.596 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:07.596 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:08.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:07 vm00.local ceph-mon[49980]: pgmap v2188: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:08.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:28:08.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:28:08.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:28:08.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:07 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:28:08.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: pgmap v2188: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:08.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:28:08.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:28:08.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:28:08.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:28:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:08 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:28:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:08 vm00.local ceph-mon[49980]: pgmap v2189: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:08 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:28:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:08 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:28:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:28:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: pgmap v2189: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:28:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:28:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:10 vm00.local ceph-mon[49980]: pgmap v2190: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:10 vm03.local ceph-mon[50983]: pgmap v2190: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:12.598 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:12.598 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:12.625 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:12.625 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:13.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:13 vm00.local ceph-mon[49980]: pgmap v2191: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:13 vm03.local ceph-mon[50983]: pgmap v2191: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:15 vm00.local ceph-mon[49980]: pgmap v2192: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:15 vm03.local ceph-mon[50983]: pgmap v2192: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:17.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:16 vm00.local ceph-mon[49980]: pgmap v2193: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:28:17.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:16 vm03.local ceph-mon[50983]: pgmap v2193: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:28:17.627 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:17.627 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:17.654 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:17.655 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:19 vm00.local ceph-mon[49980]: pgmap v2194: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:19 vm03.local ceph-mon[50983]: pgmap v2194: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:21 vm00.local ceph-mon[49980]: pgmap v2195: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:28:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:21 vm03.local ceph-mon[50983]: pgmap v2195: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:28:22.656 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:22.656 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:22.682 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:22.682 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:23 vm03.local ceph-mon[50983]: pgmap v2196: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:28:23.850 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:23 vm00.local ceph-mon[49980]: pgmap v2196: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:28:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:25 vm03.local ceph-mon[50983]: pgmap v2197: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:26.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:25 vm00.local ceph-mon[49980]: pgmap v2197: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:27.684 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:27.684 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:27.717 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:27.717 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:27 vm03.local ceph-mon[50983]: pgmap v2198: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:27 vm00.local ceph-mon[49980]: pgmap v2198: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:28.972 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:28 vm00.local ceph-mon[49980]: pgmap v2199: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:28 vm03.local ceph-mon[50983]: pgmap v2199: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:31 vm00.local ceph-mon[49980]: pgmap v2200: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:31 vm03.local ceph-mon[50983]: pgmap v2200: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:32.718 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:32.719 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:32.746 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:32.746 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:33 vm00.local ceph-mon[49980]: pgmap v2201: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:33 vm03.local ceph-mon[50983]: pgmap v2201: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:35 vm00.local ceph-mon[49980]: pgmap v2202: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:28:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:28:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:35 vm03.local ceph-mon[50983]: pgmap v2202: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:28:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:28:37.747 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:37.748 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:37.775 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:37.775 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:37 vm00.local ceph-mon[49980]: pgmap v2203: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:37 vm03.local ceph-mon[50983]: pgmap v2203: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:39 vm00.local ceph-mon[49980]: pgmap v2204: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:39.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:39 vm03.local ceph-mon[50983]: pgmap v2204: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:41.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:41 vm00.local ceph-mon[49980]: pgmap v2205: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:41 vm03.local ceph-mon[50983]: pgmap v2205: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:42.776 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:42.777 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:42.802 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:42.803 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:43 vm00.local ceph-mon[49980]: pgmap v2206: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:43 vm03.local ceph-mon[50983]: pgmap v2206: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:45 vm00.local ceph-mon[49980]: pgmap v2207: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:45 vm03.local ceph-mon[50983]: pgmap v2207: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:47 vm00.local ceph-mon[49980]: pgmap v2208: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:47.804 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:47.805 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:47 vm03.local ceph-mon[50983]: pgmap v2208: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:47.830 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:47.831 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:49 vm00.local ceph-mon[49980]: pgmap v2209: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:49 vm03.local ceph-mon[50983]: pgmap v2209: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:51 vm00.local ceph-mon[49980]: pgmap v2210: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:51 vm03.local ceph-mon[50983]: pgmap v2210: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:52.832 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:52.833 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:52.858 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:52.858 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:53 vm00.local ceph-mon[49980]: pgmap v2211: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:53 vm03.local ceph-mon[50983]: pgmap v2211: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:55 vm00.local ceph-mon[49980]: pgmap v2212: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:55 vm03.local ceph-mon[50983]: pgmap v2212: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:57 vm00.local ceph-mon[49980]: pgmap v2213: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:57 vm03.local ceph-mon[50983]: pgmap v2213: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:28:57.859 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:28:57.860 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:28:57.887 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:28:57.887 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:28:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:28:59 vm00.local ceph-mon[49980]: pgmap v2214: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:28:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:28:59 vm03.local ceph-mon[50983]: pgmap v2214: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:01 vm00.local ceph-mon[49980]: pgmap v2215: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:01 vm03.local ceph-mon[50983]: pgmap v2215: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:02.889 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:02.889 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:02.915 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:02.915 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:03 vm03.local ceph-mon[50983]: pgmap v2216: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:03.854 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:03 vm00.local ceph-mon[49980]: pgmap v2216: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:05 vm03.local ceph-mon[50983]: pgmap v2217: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:06.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:05 vm00.local ceph-mon[49980]: pgmap v2217: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:07 vm03.local ceph-mon[50983]: pgmap v2218: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:07.916 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:07.917 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:07.943 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:07.943 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:07 vm00.local ceph-mon[49980]: pgmap v2218: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:08.759 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:29:08.759 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:29:08.759 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:08 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:29:08.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:08 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:29:08.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:08 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:29:08.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:08 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:29:09.740 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:09 vm00.local ceph-mon[49980]: pgmap v2219: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:09.740 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:09 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:09.740 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:09 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:29:09.740 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:09 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:09 vm03.local ceph-mon[50983]: pgmap v2219: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:09 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:09 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:29:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:09 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:10 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:10 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:11.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:10 vm00.local ceph-mon[49980]: pgmap v2220: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:10 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:10 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:29:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:10 vm03.local ceph-mon[50983]: pgmap v2220: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:12.945 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:12.945 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:12.972 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:12.972 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:13.728 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:13 vm00.local ceph-mon[49980]: pgmap v2221: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:13 vm03.local ceph-mon[50983]: pgmap v2221: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:15 vm00.local ceph-mon[49980]: pgmap v2222: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:15 vm03.local ceph-mon[50983]: pgmap v2222: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:17 vm00.local ceph-mon[49980]: pgmap v2223: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:17 vm03.local ceph-mon[50983]: pgmap v2223: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:17.974 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:17.975 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:18.001 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:18.001 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:19.713 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:19 vm03.local ceph-mon[50983]: pgmap v2224: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:19 vm00.local ceph-mon[49980]: pgmap v2224: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:21 vm00.local ceph-mon[49980]: pgmap v2225: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:21 vm03.local ceph-mon[50983]: pgmap v2225: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:23.003 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:23.003 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:23.028 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:23.029 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:23 vm00.local ceph-mon[49980]: pgmap v2226: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:23 vm03.local ceph-mon[50983]: pgmap v2226: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:25 vm00.local ceph-mon[49980]: pgmap v2227: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:25 vm03.local ceph-mon[50983]: pgmap v2227: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:27 vm00.local ceph-mon[49980]: pgmap v2228: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:27 vm03.local ceph-mon[50983]: pgmap v2228: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:28.030 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:28.031 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:28.056 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:28.056 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:29 vm00.local ceph-mon[49980]: pgmap v2229: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:29 vm03.local ceph-mon[50983]: pgmap v2229: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:31 vm00.local ceph-mon[49980]: pgmap v2230: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:31 vm03.local ceph-mon[50983]: pgmap v2230: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:33.057 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:33.058 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:33.155 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:33.156 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:33 vm00.local ceph-mon[49980]: pgmap v2231: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:33 vm03.local ceph-mon[50983]: pgmap v2231: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:35 vm00.local ceph-mon[49980]: pgmap v2232: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:29:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:29:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:35 vm03.local ceph-mon[50983]: pgmap v2232: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:29:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:29:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:37 vm00.local ceph-mon[49980]: pgmap v2233: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:37 vm03.local ceph-mon[50983]: pgmap v2233: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:38.157 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:38.158 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:38.183 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:38.184 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:39.609 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:39 vm03.local ceph-mon[50983]: pgmap v2234: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:39.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:39 vm00.local ceph-mon[49980]: pgmap v2234: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:41 vm03.local ceph-mon[50983]: pgmap v2235: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:41 vm00.local ceph-mon[49980]: pgmap v2235: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:43.185 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:43.186 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:43.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:42 vm00.local ceph-mon[49980]: pgmap v2236: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:43 vm03.local ceph-mon[50983]: pgmap v2236: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:43.533 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:43.533 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:45 vm00.local ceph-mon[49980]: pgmap v2237: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:45 vm03.local ceph-mon[50983]: pgmap v2237: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:47 vm00.local ceph-mon[49980]: pgmap v2238: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:47 vm03.local ceph-mon[50983]: pgmap v2238: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:48.535 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:48.536 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:48.563 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:48.563 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:48.979 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:48 vm00.local ceph-mon[49980]: pgmap v2239: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:48 vm03.local ceph-mon[50983]: pgmap v2239: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:51 vm00.local ceph-mon[49980]: pgmap v2240: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:51 vm03.local ceph-mon[50983]: pgmap v2240: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:53 vm00.local ceph-mon[49980]: pgmap v2241: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:53 vm03.local ceph-mon[50983]: pgmap v2241: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:53.564 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:53.565 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:53.591 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:53.592 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:56.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:55 vm00.local ceph-mon[49980]: pgmap v2242: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:56.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:55 vm03.local ceph-mon[50983]: pgmap v2242: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:57 vm03.local ceph-mon[50983]: pgmap v2243: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:57 vm00.local ceph-mon[49980]: pgmap v2243: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:29:58.593 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:29:58.593 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:29:58.619 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:29:58.619 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:29:58.980 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:29:58 vm00.local ceph-mon[49980]: pgmap v2244: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:29:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:29:58 vm03.local ceph-mon[50983]: pgmap v2244: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T06:30:00.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T06:30:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:01 vm03.local ceph-mon[50983]: pgmap v2245: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:01 vm00.local ceph-mon[49980]: pgmap v2245: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:03 vm00.local ceph-mon[49980]: pgmap v2246: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:03 vm03.local ceph-mon[50983]: pgmap v2246: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:03.621 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:03.621 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:03.649 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:03.649 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:05 vm00.local ceph-mon[49980]: pgmap v2247: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:05 vm03.local ceph-mon[50983]: pgmap v2247: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:07 vm00.local ceph-mon[49980]: pgmap v2248: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:07 vm03.local ceph-mon[50983]: pgmap v2248: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:08.651 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:08.651 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:08.678 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:08.678 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:08.981 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:08 vm00.local ceph-mon[49980]: pgmap v2249: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:08 vm03.local ceph-mon[50983]: pgmap v2249: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:09 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:30:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:09 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:30:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:09 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:30:10.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:09 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:30:10.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:09 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:30:10.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:09 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:30:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:10 vm00.local ceph-mon[49980]: pgmap v2250: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:10 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:30:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:10 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:30:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:10 vm03.local ceph-mon[50983]: pgmap v2250: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:10 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:30:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:10 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:30:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:12 vm00.local ceph-mon[49980]: pgmap v2251: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:12 vm03.local ceph-mon[50983]: pgmap v2251: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:13.680 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:13.680 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:13.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:13.707 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:14 vm00.local ceph-mon[49980]: pgmap v2252: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:14 vm03.local ceph-mon[50983]: pgmap v2252: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:17.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:17 vm03.local ceph-mon[50983]: pgmap v2253: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:17 vm00.local ceph-mon[49980]: pgmap v2253: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:18.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:18.709 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:18.737 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:18.738 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:18.982 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:18 vm00.local ceph-mon[49980]: pgmap v2254: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:18 vm03.local ceph-mon[50983]: pgmap v2254: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:21.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:21 vm00.local ceph-mon[49980]: pgmap v2255: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:21 vm03.local ceph-mon[50983]: pgmap v2255: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:23 vm03.local ceph-mon[50983]: pgmap v2256: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:23 vm00.local ceph-mon[49980]: pgmap v2256: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:23.739 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:23.740 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:23.765 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:23.766 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:25 vm03.local ceph-mon[50983]: pgmap v2257: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:25 vm00.local ceph-mon[49980]: pgmap v2257: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:27 vm03.local ceph-mon[50983]: pgmap v2258: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:27.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:27 vm00.local ceph-mon[49980]: pgmap v2258: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:28.768 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:28.768 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:28.794 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:28.794 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:28.983 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:28 vm00.local ceph-mon[49980]: pgmap v2259: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:28 vm03.local ceph-mon[50983]: pgmap v2259: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:31 vm00.local ceph-mon[49980]: pgmap v2260: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:31 vm03.local ceph-mon[50983]: pgmap v2260: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:33 vm03.local ceph-mon[50983]: pgmap v2261: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:33 vm00.local ceph-mon[49980]: pgmap v2261: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:33.796 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:33.796 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:33.824 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:33.824 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:35 vm03.local ceph-mon[50983]: pgmap v2262: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:35 vm00.local ceph-mon[49980]: pgmap v2262: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:30:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:30:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:30:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:30:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:37 vm03.local ceph-mon[50983]: pgmap v2263: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:37.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:37 vm00.local ceph-mon[49980]: pgmap v2263: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:38.826 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:38.826 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:38.853 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:38.854 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:38.984 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:38 vm00.local ceph-mon[49980]: pgmap v2264: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:38 vm03.local ceph-mon[50983]: pgmap v2264: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:41 vm00.local ceph-mon[49980]: pgmap v2265: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:41 vm03.local ceph-mon[50983]: pgmap v2265: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:43 vm03.local ceph-mon[50983]: pgmap v2266: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:43 vm00.local ceph-mon[49980]: pgmap v2266: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:43.855 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:43.856 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:43.882 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:43.882 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:45 vm03.local ceph-mon[50983]: pgmap v2267: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:45 vm00.local ceph-mon[49980]: pgmap v2267: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:47.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:47 vm03.local ceph-mon[50983]: pgmap v2268: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:47 vm00.local ceph-mon[49980]: pgmap v2268: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:48.884 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:48.884 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:48.910 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:48.910 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:48.984 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:48 vm00.local ceph-mon[49980]: pgmap v2269: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:48 vm03.local ceph-mon[50983]: pgmap v2269: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:51 vm03.local ceph-mon[50983]: pgmap v2270: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:51 vm00.local ceph-mon[49980]: pgmap v2270: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:53 vm03.local ceph-mon[50983]: pgmap v2271: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:53 vm00.local ceph-mon[49980]: pgmap v2271: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:53.912 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:53.912 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:53.938 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:53.939 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:55 vm03.local ceph-mon[50983]: pgmap v2272: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:55 vm00.local ceph-mon[49980]: pgmap v2272: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:57 vm03.local ceph-mon[50983]: pgmap v2273: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:57 vm00.local ceph-mon[49980]: pgmap v2273: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:30:58.940 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:30:58.941 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:30:58.967 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:30:58.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:30:58.985 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:30:58 vm00.local ceph-mon[49980]: pgmap v2274: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:30:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:30:58 vm03.local ceph-mon[50983]: pgmap v2274: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:01 vm03.local ceph-mon[50983]: pgmap v2275: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:01 vm00.local ceph-mon[49980]: pgmap v2275: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:03 vm03.local ceph-mon[50983]: pgmap v2276: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:03 vm00.local ceph-mon[49980]: pgmap v2276: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:03.969 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:03.970 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:03.999 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:03.999 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:05 vm03.local ceph-mon[50983]: pgmap v2277: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:05 vm00.local ceph-mon[49980]: pgmap v2277: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:07.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:07 vm03.local ceph-mon[50983]: pgmap v2278: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:07.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:07 vm00.local ceph-mon[49980]: pgmap v2278: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:08.985 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:08 vm00.local ceph-mon[49980]: pgmap v2279: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:09.000 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:09.001 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:09.027 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:09.028 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:08 vm03.local ceph-mon[50983]: pgmap v2279: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:10.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:10 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:31:10.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:10 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:31:10.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:10 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:31:10.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:10 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:31:10.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:10 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:31:10.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:10 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:31:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:11 vm00.local ceph-mon[49980]: pgmap v2280: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:31:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:31:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:11 vm03.local ceph-mon[50983]: pgmap v2280: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:31:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:31:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:13 vm00.local ceph-mon[49980]: pgmap v2281: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:13 vm03.local ceph-mon[50983]: pgmap v2281: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:14.030 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:14.030 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:14.057 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:14.058 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:15 vm00.local ceph-mon[49980]: pgmap v2282: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:15 vm03.local ceph-mon[50983]: pgmap v2282: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:17 vm00.local ceph-mon[49980]: pgmap v2283: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:17 vm03.local ceph-mon[50983]: pgmap v2283: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:18.986 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:18 vm00.local ceph-mon[49980]: pgmap v2284: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:18 vm03.local ceph-mon[50983]: pgmap v2284: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:19.059 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:19.060 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:19.088 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:19.088 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:21 vm03.local ceph-mon[50983]: pgmap v2285: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:21 vm00.local ceph-mon[49980]: pgmap v2285: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:23 vm03.local ceph-mon[50983]: pgmap v2286: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:23 vm00.local ceph-mon[49980]: pgmap v2286: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:24.090 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:24.090 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:24.116 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:24.117 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:25 vm03.local ceph-mon[50983]: pgmap v2287: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:25 vm00.local ceph-mon[49980]: pgmap v2287: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:27 vm00.local ceph-mon[49980]: pgmap v2288: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:27 vm03.local ceph-mon[50983]: pgmap v2288: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:28.987 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:28 vm00.local ceph-mon[49980]: pgmap v2289: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:28 vm03.local ceph-mon[50983]: pgmap v2289: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:29.118 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:29.119 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:29.147 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:29.147 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:31 vm03.local ceph-mon[50983]: pgmap v2290: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:31 vm00.local ceph-mon[49980]: pgmap v2290: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:33 vm03.local ceph-mon[50983]: pgmap v2291: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:33 vm00.local ceph-mon[49980]: pgmap v2291: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:34.148 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:34.149 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:34.179 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:34.179 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:35 vm03.local ceph-mon[50983]: pgmap v2292: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:35 vm00.local ceph-mon[49980]: pgmap v2292: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:31:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:31:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:31:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:31:37.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:37 vm00.local ceph-mon[49980]: pgmap v2293: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:37 vm03.local ceph-mon[50983]: pgmap v2293: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:38.988 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:38 vm00.local ceph-mon[49980]: pgmap v2294: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:38 vm03.local ceph-mon[50983]: pgmap v2294: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:39.181 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:39.181 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:39.209 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:39.210 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:41 vm03.local ceph-mon[50983]: pgmap v2295: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:41 vm00.local ceph-mon[49980]: pgmap v2295: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:43.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:43 vm03.local ceph-mon[50983]: pgmap v2296: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:43.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:43 vm00.local ceph-mon[49980]: pgmap v2296: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:44.211 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:44.212 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:44.238 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:44.238 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:45 vm03.local ceph-mon[50983]: pgmap v2297: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:45 vm00.local ceph-mon[49980]: pgmap v2297: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:47.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:47 vm00.local ceph-mon[49980]: pgmap v2298: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:47 vm03.local ceph-mon[50983]: pgmap v2298: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:48.988 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:48 vm00.local ceph-mon[49980]: pgmap v2299: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:48 vm03.local ceph-mon[50983]: pgmap v2299: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:49.240 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:49.240 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:49.266 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:49.266 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:51.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:51 vm03.local ceph-mon[50983]: pgmap v2300: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:51 vm00.local ceph-mon[49980]: pgmap v2300: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:53 vm03.local ceph-mon[50983]: pgmap v2301: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:53 vm00.local ceph-mon[49980]: pgmap v2301: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:54.267 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:54.268 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:54.294 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:54.294 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:31:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:55 vm00.local ceph-mon[49980]: pgmap v2302: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:55 vm03.local ceph-mon[50983]: pgmap v2302: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:57.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:57 vm00.local ceph-mon[49980]: pgmap v2303: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:57 vm03.local ceph-mon[50983]: pgmap v2303: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:31:58.989 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:31:58 vm00.local ceph-mon[49980]: pgmap v2304: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:31:58 vm03.local ceph-mon[50983]: pgmap v2304: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:31:59.296 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:31:59.296 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:31:59.323 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:31:59.323 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:01 vm03.local ceph-mon[50983]: pgmap v2305: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:01 vm00.local ceph-mon[49980]: pgmap v2305: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:03 vm00.local ceph-mon[49980]: pgmap v2306: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:03 vm03.local ceph-mon[50983]: pgmap v2306: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:04.325 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:04.325 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:04.351 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:04.351 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:05 vm00.local ceph-mon[49980]: pgmap v2307: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:05 vm03.local ceph-mon[50983]: pgmap v2307: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:07.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:07 vm00.local ceph-mon[49980]: pgmap v2308: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:07 vm03.local ceph-mon[50983]: pgmap v2308: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:08.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:08 vm00.local ceph-mon[49980]: pgmap v2309: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:08 vm03.local ceph-mon[50983]: pgmap v2309: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:09.352 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:09.353 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:09.379 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:09.380 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:11 vm03.local ceph-mon[50983]: pgmap v2310: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:32:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:32:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:32:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:32:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:32:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:11 vm00.local ceph-mon[49980]: pgmap v2310: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:32:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:32:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:32:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:32:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:32:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:13 vm03.local ceph-mon[50983]: pgmap v2311: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:13.333 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:13 vm00.local ceph-mon[49980]: pgmap v2311: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:14.381 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:14.381 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:14.408 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:14.409 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:15 vm00.local ceph-mon[49980]: pgmap v2312: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:15 vm03.local ceph-mon[50983]: pgmap v2312: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:17.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:17 vm00.local ceph-mon[49980]: pgmap v2313: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:17 vm03.local ceph-mon[50983]: pgmap v2313: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:18.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:18 vm00.local ceph-mon[49980]: pgmap v2314: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:18 vm03.local ceph-mon[50983]: pgmap v2314: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:19.410 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:19.411 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:19.437 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:19.437 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:21.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:21 vm03.local ceph-mon[50983]: pgmap v2315: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:21 vm00.local ceph-mon[49980]: pgmap v2315: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:23 vm03.local ceph-mon[50983]: pgmap v2316: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:23 vm00.local ceph-mon[49980]: pgmap v2316: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:24.439 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:24.439 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:24.465 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:24.466 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:25 vm00.local ceph-mon[49980]: pgmap v2317: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:25 vm03.local ceph-mon[50983]: pgmap v2317: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:27.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:27 vm00.local ceph-mon[49980]: pgmap v2318: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:27 vm03.local ceph-mon[50983]: pgmap v2318: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:28.992 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:28 vm00.local ceph-mon[49980]: pgmap v2319: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:28 vm03.local ceph-mon[50983]: pgmap v2319: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:29.467 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:29.468 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:29.494 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:29.494 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:31 vm00.local ceph-mon[49980]: pgmap v2320: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:31 vm03.local ceph-mon[50983]: pgmap v2320: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:33 vm00.local ceph-mon[49980]: pgmap v2321: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:33 vm03.local ceph-mon[50983]: pgmap v2321: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:34.496 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:34.496 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:34.522 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:34.522 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:35 vm00.local ceph-mon[49980]: pgmap v2322: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:35 vm03.local ceph-mon[50983]: pgmap v2322: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:32:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:32:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:32:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:32:37.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:37 vm00.local ceph-mon[49980]: pgmap v2323: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:37 vm03.local ceph-mon[50983]: pgmap v2323: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:38.993 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:38 vm00.local ceph-mon[49980]: pgmap v2324: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:38 vm03.local ceph-mon[50983]: pgmap v2324: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:39.523 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:39.524 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:39.550 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:39.550 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:41 vm03.local ceph-mon[50983]: pgmap v2325: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:41 vm00.local ceph-mon[49980]: pgmap v2325: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:43.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:43 vm00.local ceph-mon[49980]: pgmap v2326: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:43 vm03.local ceph-mon[50983]: pgmap v2326: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:44.552 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:44.552 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:44.607 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:44.607 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:45 vm00.local ceph-mon[49980]: pgmap v2327: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:45 vm03.local ceph-mon[50983]: pgmap v2327: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:47 vm00.local ceph-mon[49980]: pgmap v2328: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:47 vm03.local ceph-mon[50983]: pgmap v2328: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:48.993 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:48 vm00.local ceph-mon[49980]: pgmap v2329: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:48 vm03.local ceph-mon[50983]: pgmap v2329: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:49.609 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:49.609 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:49.635 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:49.635 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:51 vm00.local ceph-mon[49980]: pgmap v2330: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:51 vm03.local ceph-mon[50983]: pgmap v2330: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:53 vm00.local ceph-mon[49980]: pgmap v2331: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:53 vm03.local ceph-mon[50983]: pgmap v2331: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:54.636 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:54.637 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:54.662 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:54.662 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:32:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:55 vm00.local ceph-mon[49980]: pgmap v2332: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:55 vm03.local ceph-mon[50983]: pgmap v2332: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:57 vm00.local ceph-mon[49980]: pgmap v2333: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:57 vm03.local ceph-mon[50983]: pgmap v2333: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:32:58.994 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:32:58 vm00.local ceph-mon[49980]: pgmap v2334: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:32:58 vm03.local ceph-mon[50983]: pgmap v2334: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:32:59.664 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:32:59.664 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:32:59.690 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:32:59.691 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:01 vm00.local ceph-mon[49980]: pgmap v2335: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:01 vm03.local ceph-mon[50983]: pgmap v2335: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:03 vm00.local ceph-mon[49980]: pgmap v2336: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:03 vm03.local ceph-mon[50983]: pgmap v2336: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:04.692 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:04.692 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:04.717 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:04.718 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:05 vm00.local ceph-mon[49980]: pgmap v2337: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:05 vm03.local ceph-mon[50983]: pgmap v2337: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:07 vm00.local ceph-mon[49980]: pgmap v2338: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:07 vm03.local ceph-mon[50983]: pgmap v2338: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:08.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:08 vm00.local ceph-mon[49980]: pgmap v2339: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:08 vm03.local ceph-mon[50983]: pgmap v2339: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:09.719 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:09.719 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:09.746 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:09.746 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:11 vm00.local ceph-mon[49980]: pgmap v2340: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:33:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:33:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:33:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:33:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:33:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:11 vm03.local ceph-mon[50983]: pgmap v2340: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:33:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:33:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:33:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:33:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:33:13.333 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:13 vm00.local ceph-mon[49980]: pgmap v2341: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:13 vm03.local ceph-mon[50983]: pgmap v2341: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:14.748 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:14.748 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:14.775 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:14.775 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:15 vm00.local ceph-mon[49980]: pgmap v2342: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:15 vm03.local ceph-mon[50983]: pgmap v2342: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:17 vm00.local ceph-mon[49980]: pgmap v2343: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:17 vm03.local ceph-mon[50983]: pgmap v2343: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:18.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:18 vm00.local ceph-mon[49980]: pgmap v2344: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:18 vm03.local ceph-mon[50983]: pgmap v2344: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:19.777 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:19.777 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:19.802 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:19.803 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:21 vm00.local ceph-mon[49980]: pgmap v2345: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:21 vm03.local ceph-mon[50983]: pgmap v2345: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:23 vm00.local ceph-mon[49980]: pgmap v2346: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:23 vm03.local ceph-mon[50983]: pgmap v2346: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:24.804 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:24.804 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:24.829 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:24.830 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:25 vm00.local ceph-mon[49980]: pgmap v2347: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:25 vm03.local ceph-mon[50983]: pgmap v2347: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:27 vm00.local ceph-mon[49980]: pgmap v2348: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:27 vm03.local ceph-mon[50983]: pgmap v2348: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:28.996 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:28 vm00.local ceph-mon[49980]: pgmap v2349: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:28 vm03.local ceph-mon[50983]: pgmap v2349: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:29.831 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:29.832 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:29.856 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:29.857 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:31 vm00.local ceph-mon[49980]: pgmap v2350: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:31 vm03.local ceph-mon[50983]: pgmap v2350: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:33.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:33 vm00.local ceph-mon[49980]: pgmap v2351: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:33 vm03.local ceph-mon[50983]: pgmap v2351: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:34.858 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:34.859 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:34.884 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:34.885 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:35 vm00.local ceph-mon[49980]: pgmap v2352: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:35 vm03.local ceph-mon[50983]: pgmap v2352: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:33:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:33:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:33:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:33:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:37 vm00.local ceph-mon[49980]: pgmap v2353: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:37 vm03.local ceph-mon[50983]: pgmap v2353: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:38.997 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:38 vm00.local ceph-mon[49980]: pgmap v2354: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:38 vm03.local ceph-mon[50983]: pgmap v2354: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:39.886 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:39.886 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:39.912 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:39.912 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:41 vm00.local ceph-mon[49980]: pgmap v2355: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:41 vm03.local ceph-mon[50983]: pgmap v2355: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:43.333 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:43 vm00.local ceph-mon[49980]: pgmap v2356: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:43 vm03.local ceph-mon[50983]: pgmap v2356: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:44.914 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:44.914 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:44.940 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:44.941 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:45 vm00.local ceph-mon[49980]: pgmap v2357: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:45 vm03.local ceph-mon[50983]: pgmap v2357: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:47 vm00.local ceph-mon[49980]: pgmap v2358: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:47 vm03.local ceph-mon[50983]: pgmap v2358: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:48.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:48 vm00.local ceph-mon[49980]: pgmap v2359: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:48 vm03.local ceph-mon[50983]: pgmap v2359: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:49.942 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:49.942 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:49.968 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:49.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:51 vm00.local ceph-mon[49980]: pgmap v2360: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:51 vm03.local ceph-mon[50983]: pgmap v2360: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:53.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:53 vm00.local ceph-mon[49980]: pgmap v2361: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:53 vm03.local ceph-mon[50983]: pgmap v2361: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:54.969 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:54.970 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:33:54.997 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:33:54.997 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:33:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:55 vm00.local ceph-mon[49980]: pgmap v2362: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:55 vm03.local ceph-mon[50983]: pgmap v2362: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:57 vm00.local ceph-mon[49980]: pgmap v2363: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:57 vm03.local ceph-mon[50983]: pgmap v2363: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:33:58.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:33:58 vm00.local ceph-mon[49980]: pgmap v2364: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:33:58 vm03.local ceph-mon[50983]: pgmap v2364: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:33:59.999 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:33:59.999 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:00.024 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:00.025 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:01 vm00.local ceph-mon[49980]: pgmap v2365: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:01 vm03.local ceph-mon[50983]: pgmap v2365: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:03 vm00.local ceph-mon[49980]: pgmap v2366: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:03 vm03.local ceph-mon[50983]: pgmap v2366: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:05.026 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:05.026 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:05.052 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:05.052 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:05 vm00.local ceph-mon[49980]: pgmap v2367: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:05 vm03.local ceph-mon[50983]: pgmap v2367: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:07 vm00.local ceph-mon[49980]: pgmap v2368: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:07 vm03.local ceph-mon[50983]: pgmap v2368: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:09.000 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:08 vm00.local ceph-mon[49980]: pgmap v2369: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:08 vm03.local ceph-mon[50983]: pgmap v2369: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:10.054 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:10.054 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:10.081 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:10.081 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:11 vm00.local ceph-mon[49980]: pgmap v2370: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:34:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:34:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:34:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:34:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:34:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:11 vm03.local ceph-mon[50983]: pgmap v2370: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:34:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:34:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:34:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:34:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:34:13.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:13 vm00.local ceph-mon[49980]: pgmap v2371: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:13 vm03.local ceph-mon[50983]: pgmap v2371: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:15.082 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:15.083 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:15.109 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:15.110 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:15 vm00.local ceph-mon[49980]: pgmap v2372: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:15 vm03.local ceph-mon[50983]: pgmap v2372: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:17 vm00.local ceph-mon[49980]: pgmap v2373: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:17 vm03.local ceph-mon[50983]: pgmap v2373: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:19.000 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:18 vm00.local ceph-mon[49980]: pgmap v2374: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:18 vm03.local ceph-mon[50983]: pgmap v2374: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:20.111 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:20.111 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:20.139 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:20.139 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:21 vm00.local ceph-mon[49980]: pgmap v2375: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:21 vm03.local ceph-mon[50983]: pgmap v2375: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:23 vm00.local ceph-mon[49980]: pgmap v2376: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:23 vm03.local ceph-mon[50983]: pgmap v2376: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:25.140 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:25.141 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:25.167 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:25.167 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:25 vm00.local ceph-mon[49980]: pgmap v2377: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:25 vm03.local ceph-mon[50983]: pgmap v2377: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:27 vm00.local ceph-mon[49980]: pgmap v2378: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:27 vm03.local ceph-mon[50983]: pgmap v2378: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:29.000 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:28 vm00.local ceph-mon[49980]: pgmap v2379: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:28 vm03.local ceph-mon[50983]: pgmap v2379: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:30.168 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:30.169 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:30.195 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:30.195 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:31 vm00.local ceph-mon[49980]: pgmap v2380: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:31 vm03.local ceph-mon[50983]: pgmap v2380: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:33 vm00.local ceph-mon[49980]: pgmap v2381: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:33 vm03.local ceph-mon[50983]: pgmap v2381: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:35.197 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:35.197 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:35.223 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:35.223 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:35 vm00.local ceph-mon[49980]: pgmap v2382: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:35 vm03.local ceph-mon[50983]: pgmap v2382: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:34:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:34:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:34:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:34:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:37 vm00.local ceph-mon[49980]: pgmap v2383: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:37 vm03.local ceph-mon[50983]: pgmap v2383: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:39.000 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:38 vm00.local ceph-mon[49980]: pgmap v2384: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:38 vm03.local ceph-mon[50983]: pgmap v2384: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:40.224 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:40.225 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:40.250 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:40.250 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:41 vm00.local ceph-mon[49980]: pgmap v2385: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:41 vm03.local ceph-mon[50983]: pgmap v2385: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:43.333 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:43 vm00.local ceph-mon[49980]: pgmap v2386: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:43 vm03.local ceph-mon[50983]: pgmap v2386: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:45.252 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:45.252 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:45.278 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:45.278 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:45 vm00.local ceph-mon[49980]: pgmap v2387: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:45 vm03.local ceph-mon[50983]: pgmap v2387: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:47 vm00.local ceph-mon[49980]: pgmap v2388: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:47 vm03.local ceph-mon[50983]: pgmap v2388: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:49.000 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:48 vm00.local ceph-mon[49980]: pgmap v2389: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:48 vm03.local ceph-mon[50983]: pgmap v2389: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:50.279 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:50.280 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:50.306 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:50.306 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:51 vm00.local ceph-mon[49980]: pgmap v2390: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:51 vm03.local ceph-mon[50983]: pgmap v2390: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:53.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:53 vm00.local ceph-mon[49980]: pgmap v2391: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:53 vm03.local ceph-mon[50983]: pgmap v2391: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:55.308 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:34:55.308 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:34:55.335 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:34:55.335 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:34:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:55 vm00.local ceph-mon[49980]: pgmap v2392: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:55 vm03.local ceph-mon[50983]: pgmap v2392: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:57 vm00.local ceph-mon[49980]: pgmap v2393: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:57 vm03.local ceph-mon[50983]: pgmap v2393: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:34:59.001 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:34:58 vm00.local ceph-mon[49980]: pgmap v2394: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:34:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:34:58 vm03.local ceph-mon[50983]: pgmap v2394: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:00.337 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:00.337 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:00.367 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:00.368 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:01 vm00.local ceph-mon[49980]: pgmap v2395: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:01 vm03.local ceph-mon[50983]: pgmap v2395: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:03 vm00.local ceph-mon[49980]: pgmap v2396: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:03 vm03.local ceph-mon[50983]: pgmap v2396: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:05.369 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:05.369 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:05.395 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:05.396 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:05 vm00.local ceph-mon[49980]: pgmap v2397: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:05 vm03.local ceph-mon[50983]: pgmap v2397: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:07 vm00.local ceph-mon[49980]: pgmap v2398: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:07 vm03.local ceph-mon[50983]: pgmap v2398: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:09.002 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:08 vm00.local ceph-mon[49980]: pgmap v2399: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:08 vm03.local ceph-mon[50983]: pgmap v2399: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:10.397 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:10.398 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:10.423 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:10.424 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:11 vm00.local ceph-mon[49980]: pgmap v2400: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:35:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:35:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:11 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:35:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:11 vm03.local ceph-mon[50983]: pgmap v2400: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:35:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:35:11.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:11 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:35:12.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:35:12.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:35:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:35:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:35:13.432 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:13 vm00.local ceph-mon[49980]: pgmap v2401: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:13 vm03.local ceph-mon[50983]: pgmap v2401: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:15.425 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:15.426 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:15.453 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:15.453 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:15 vm00.local ceph-mon[49980]: pgmap v2402: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:15 vm03.local ceph-mon[50983]: pgmap v2402: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:17 vm00.local ceph-mon[49980]: pgmap v2403: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:17 vm03.local ceph-mon[50983]: pgmap v2403: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:19.002 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:18 vm00.local ceph-mon[49980]: pgmap v2404: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:18 vm03.local ceph-mon[50983]: pgmap v2404: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:20.455 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:20.455 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:20.480 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:20.481 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:21 vm00.local ceph-mon[49980]: pgmap v2405: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:21 vm03.local ceph-mon[50983]: pgmap v2405: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:23 vm00.local ceph-mon[49980]: pgmap v2406: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:23 vm03.local ceph-mon[50983]: pgmap v2406: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:25.482 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:25.482 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:25.509 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:25.510 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:25 vm00.local ceph-mon[49980]: pgmap v2407: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:25 vm03.local ceph-mon[50983]: pgmap v2407: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:27 vm00.local ceph-mon[49980]: pgmap v2408: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:27 vm03.local ceph-mon[50983]: pgmap v2408: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:29.004 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:28 vm00.local ceph-mon[49980]: pgmap v2409: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:28 vm03.local ceph-mon[50983]: pgmap v2409: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:30.511 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:30.511 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:30.538 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:30.538 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:31 vm00.local ceph-mon[49980]: pgmap v2410: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:31 vm03.local ceph-mon[50983]: pgmap v2410: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:33 vm00.local ceph-mon[49980]: pgmap v2411: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:33 vm03.local ceph-mon[50983]: pgmap v2411: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:35 vm00.local ceph-mon[49980]: pgmap v2412: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:35.539 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:35.540 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:35 vm03.local ceph-mon[50983]: pgmap v2412: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:35.567 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:35.567 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:35:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:35:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:35:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:35:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:37 vm00.local ceph-mon[49980]: pgmap v2413: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:37 vm03.local ceph-mon[50983]: pgmap v2413: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:39.004 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:38 vm00.local ceph-mon[49980]: pgmap v2414: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:38 vm03.local ceph-mon[50983]: pgmap v2414: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:40.568 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:40.569 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:40.596 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:40.596 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:41 vm00.local ceph-mon[49980]: pgmap v2415: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:41 vm03.local ceph-mon[50983]: pgmap v2415: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:43 vm00.local ceph-mon[49980]: pgmap v2416: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:43 vm03.local ceph-mon[50983]: pgmap v2416: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:45 vm00.local ceph-mon[49980]: pgmap v2417: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:45 vm03.local ceph-mon[50983]: pgmap v2417: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:45.597 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:45.598 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:45.625 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:45.626 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:47 vm00.local ceph-mon[49980]: pgmap v2418: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:47 vm03.local ceph-mon[50983]: pgmap v2418: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:49.004 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:48 vm00.local ceph-mon[49980]: pgmap v2419: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:48 vm03.local ceph-mon[50983]: pgmap v2419: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:50.627 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:50.628 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:50.655 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:50.656 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:51 vm00.local ceph-mon[49980]: pgmap v2420: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:51 vm03.local ceph-mon[50983]: pgmap v2420: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:53.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:53 vm00.local ceph-mon[49980]: pgmap v2421: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:53 vm03.local ceph-mon[50983]: pgmap v2421: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:55 vm00.local ceph-mon[49980]: pgmap v2422: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:55 vm03.local ceph-mon[50983]: pgmap v2422: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:55.657 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:35:55.658 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:35:55.684 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:35:55.685 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:35:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:57 vm00.local ceph-mon[49980]: pgmap v2423: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:57 vm03.local ceph-mon[50983]: pgmap v2423: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:35:59.005 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:35:58 vm00.local ceph-mon[49980]: pgmap v2424: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:35:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:35:58 vm03.local ceph-mon[50983]: pgmap v2424: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:00.686 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:00.687 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:00.712 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:00.713 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:01 vm00.local ceph-mon[49980]: pgmap v2425: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:01 vm03.local ceph-mon[50983]: pgmap v2425: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:03 vm00.local ceph-mon[49980]: pgmap v2426: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:03 vm03.local ceph-mon[50983]: pgmap v2426: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:05 vm00.local ceph-mon[49980]: pgmap v2427: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:05 vm03.local ceph-mon[50983]: pgmap v2427: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:05.714 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:05.715 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:05.741 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:05.742 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:07 vm00.local ceph-mon[49980]: pgmap v2428: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:07 vm03.local ceph-mon[50983]: pgmap v2428: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:09.005 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:08 vm00.local ceph-mon[49980]: pgmap v2429: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:08 vm03.local ceph-mon[50983]: pgmap v2429: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:10.743 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:10.743 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:10.770 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:10.770 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:11.401 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:11 vm03.local ceph-mon[50983]: pgmap v2430: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:11.409 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:11 vm00.local ceph-mon[49980]: pgmap v2430: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:36:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:36:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:36:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:36:12.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:36:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:36:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:36:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:36:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:36:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:36:13.676 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:13 vm00.local ceph-mon[49980]: pgmap v2431: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:13 vm03.local ceph-mon[50983]: pgmap v2431: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:15.772 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:15.772 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:15 vm00.local ceph-mon[49980]: pgmap v2432: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:15.799 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:15.800 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:15 vm03.local ceph-mon[50983]: pgmap v2432: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:17 vm00.local ceph-mon[49980]: pgmap v2433: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:17 vm03.local ceph-mon[50983]: pgmap v2433: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:19.006 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:18 vm00.local ceph-mon[49980]: pgmap v2434: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:18 vm03.local ceph-mon[50983]: pgmap v2434: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:20.802 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:20.802 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:20.828 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:20.829 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:21 vm00.local ceph-mon[49980]: pgmap v2435: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:21 vm03.local ceph-mon[50983]: pgmap v2435: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:23 vm00.local ceph-mon[49980]: pgmap v2436: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:23 vm03.local ceph-mon[50983]: pgmap v2436: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:25 vm00.local ceph-mon[49980]: pgmap v2437: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:25 vm03.local ceph-mon[50983]: pgmap v2437: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:25.830 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:25.830 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:25.855 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:25.856 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:27 vm00.local ceph-mon[49980]: pgmap v2438: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:27 vm03.local ceph-mon[50983]: pgmap v2438: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:29.007 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:28 vm00.local ceph-mon[49980]: pgmap v2439: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:28 vm03.local ceph-mon[50983]: pgmap v2439: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:30.857 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:30.858 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:30.883 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:30.884 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:31 vm00.local ceph-mon[49980]: pgmap v2440: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:31 vm03.local ceph-mon[50983]: pgmap v2440: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:33 vm00.local ceph-mon[49980]: pgmap v2441: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:33 vm03.local ceph-mon[50983]: pgmap v2441: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:35 vm00.local ceph-mon[49980]: pgmap v2442: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:35 vm03.local ceph-mon[50983]: pgmap v2442: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:35.885 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:35.885 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:35.911 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:35.912 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:36:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:36:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:36:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:36:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:37 vm00.local ceph-mon[49980]: pgmap v2443: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:37 vm03.local ceph-mon[50983]: pgmap v2443: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:39.007 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:38 vm00.local ceph-mon[49980]: pgmap v2444: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:38 vm03.local ceph-mon[50983]: pgmap v2444: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:40.913 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:40.913 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:40.939 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:40.939 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:41 vm00.local ceph-mon[49980]: pgmap v2445: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:41 vm03.local ceph-mon[50983]: pgmap v2445: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:43 vm00.local ceph-mon[49980]: pgmap v2446: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:43 vm03.local ceph-mon[50983]: pgmap v2446: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:45 vm00.local ceph-mon[49980]: pgmap v2447: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:45 vm03.local ceph-mon[50983]: pgmap v2447: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:45.941 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:45.941 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:45.968 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:45.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:47 vm00.local ceph-mon[49980]: pgmap v2448: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:47 vm03.local ceph-mon[50983]: pgmap v2448: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:49.008 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:48 vm00.local ceph-mon[49980]: pgmap v2449: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:48 vm03.local ceph-mon[50983]: pgmap v2449: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:50.970 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:50.970 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:50.996 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:50.997 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:51 vm00.local ceph-mon[49980]: pgmap v2450: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:51 vm03.local ceph-mon[50983]: pgmap v2450: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:53 vm00.local ceph-mon[49980]: pgmap v2451: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:53 vm03.local ceph-mon[50983]: pgmap v2451: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:55 vm00.local ceph-mon[49980]: pgmap v2452: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:55 vm03.local ceph-mon[50983]: pgmap v2452: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:55.998 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:36:55.999 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:36:56.026 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:36:56.026 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:36:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:57 vm00.local ceph-mon[49980]: pgmap v2453: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:57 vm03.local ceph-mon[50983]: pgmap v2453: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:36:59.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:36:58 vm00.local ceph-mon[49980]: pgmap v2454: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:36:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:36:58 vm03.local ceph-mon[50983]: pgmap v2454: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:01.028 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:01.028 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:01.128 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:01.128 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:01 vm00.local ceph-mon[49980]: pgmap v2455: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:01 vm03.local ceph-mon[50983]: pgmap v2455: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:03 vm00.local ceph-mon[49980]: pgmap v2456: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:03 vm03.local ceph-mon[50983]: pgmap v2456: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:05 vm00.local ceph-mon[49980]: pgmap v2457: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:05 vm03.local ceph-mon[50983]: pgmap v2457: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:06.130 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:06.130 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:06.156 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:06.157 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:07 vm00.local ceph-mon[49980]: pgmap v2458: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:07 vm03.local ceph-mon[50983]: pgmap v2458: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:09.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:08 vm00.local ceph-mon[49980]: pgmap v2459: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:08 vm03.local ceph-mon[50983]: pgmap v2459: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:11.158 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:11.159 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:11.190 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:11.190 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:11.448 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:11 vm00.local ceph-mon[49980]: pgmap v2460: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:11.448 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:11 vm03.local ceph-mon[50983]: pgmap v2460: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:37:12.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:37:12.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:37:12.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:37:12.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:37:12.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:37:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:37:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:37:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:37:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:37:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:37:12.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:37:13.432 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:13 vm00.local ceph-mon[49980]: pgmap v2461: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:13 vm03.local ceph-mon[50983]: pgmap v2461: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:15.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:15 vm00.local ceph-mon[49980]: pgmap v2462: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:15 vm03.local ceph-mon[50983]: pgmap v2462: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:16.192 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:16.193 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:16.226 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:16.226 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:17 vm00.local ceph-mon[49980]: pgmap v2463: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:17 vm03.local ceph-mon[50983]: pgmap v2463: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:19.010 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:18 vm00.local ceph-mon[49980]: pgmap v2464: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:18 vm03.local ceph-mon[50983]: pgmap v2464: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:21.228 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:21.228 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:21.255 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:21.255 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:21 vm00.local ceph-mon[49980]: pgmap v2465: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:21 vm03.local ceph-mon[50983]: pgmap v2465: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:23 vm00.local ceph-mon[49980]: pgmap v2466: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:23 vm03.local ceph-mon[50983]: pgmap v2466: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:25 vm00.local ceph-mon[49980]: pgmap v2467: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:25 vm03.local ceph-mon[50983]: pgmap v2467: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:26.257 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:26.257 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:26.285 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:26.285 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:27 vm00.local ceph-mon[49980]: pgmap v2468: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:27 vm03.local ceph-mon[50983]: pgmap v2468: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:29.011 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:28 vm00.local ceph-mon[49980]: pgmap v2469: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:28 vm03.local ceph-mon[50983]: pgmap v2469: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:31.287 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:31.287 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:31.312 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:31.313 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:31 vm00.local ceph-mon[49980]: pgmap v2470: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:31 vm03.local ceph-mon[50983]: pgmap v2470: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:33 vm00.local ceph-mon[49980]: pgmap v2471: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:33 vm03.local ceph-mon[50983]: pgmap v2471: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:35 vm00.local ceph-mon[49980]: pgmap v2472: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:35 vm03.local ceph-mon[50983]: pgmap v2472: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:36.314 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:36.315 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:36.341 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:36.342 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:37:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:37:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:37:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:37:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:37 vm00.local ceph-mon[49980]: pgmap v2473: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:37 vm03.local ceph-mon[50983]: pgmap v2473: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:39.012 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:38 vm00.local ceph-mon[49980]: pgmap v2474: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:38 vm03.local ceph-mon[50983]: pgmap v2474: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:41.343 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:41.344 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:41.370 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:41.371 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:41 vm00.local ceph-mon[49980]: pgmap v2475: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:41 vm03.local ceph-mon[50983]: pgmap v2475: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:43 vm00.local ceph-mon[49980]: pgmap v2476: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:43 vm03.local ceph-mon[50983]: pgmap v2476: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:45 vm00.local ceph-mon[49980]: pgmap v2477: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:45 vm03.local ceph-mon[50983]: pgmap v2477: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:46.372 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:46.373 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:46.401 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:46.401 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:47 vm00.local ceph-mon[49980]: pgmap v2478: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:47 vm03.local ceph-mon[50983]: pgmap v2478: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:49.013 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:48 vm00.local ceph-mon[49980]: pgmap v2479: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:48 vm03.local ceph-mon[50983]: pgmap v2479: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:51.403 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:51.403 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:51.430 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:51.431 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:51 vm00.local ceph-mon[49980]: pgmap v2480: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:51 vm03.local ceph-mon[50983]: pgmap v2480: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:53 vm00.local ceph-mon[49980]: pgmap v2481: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:53 vm03.local ceph-mon[50983]: pgmap v2481: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:55 vm00.local ceph-mon[49980]: pgmap v2482: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:55 vm03.local ceph-mon[50983]: pgmap v2482: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:56.432 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:37:56.433 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:37:56.697 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:37:56.697 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:37:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:57 vm00.local ceph-mon[49980]: pgmap v2483: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:57 vm03.local ceph-mon[50983]: pgmap v2483: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:37:59.014 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:37:58 vm00.local ceph-mon[49980]: pgmap v2484: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:37:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:37:58 vm03.local ceph-mon[50983]: pgmap v2484: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:01 vm00.local ceph-mon[49980]: pgmap v2485: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:01 vm03.local ceph-mon[50983]: pgmap v2485: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:01.699 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:01.699 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:01.724 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:01.725 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:03 vm00.local ceph-mon[49980]: pgmap v2486: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:03 vm03.local ceph-mon[50983]: pgmap v2486: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:05 vm00.local ceph-mon[49980]: pgmap v2487: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:05 vm03.local ceph-mon[50983]: pgmap v2487: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:06.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:06.727 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:06.883 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:06.884 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:07 vm03.local ceph-mon[50983]: pgmap v2488: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:07 vm00.local ceph-mon[49980]: pgmap v2488: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:09.014 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:08 vm00.local ceph-mon[49980]: pgmap v2489: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:08 vm03.local ceph-mon[50983]: pgmap v2489: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:11 vm00.local ceph-mon[49980]: pgmap v2490: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:11 vm03.local ceph-mon[50983]: pgmap v2490: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:11.885 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:11.886 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:11.920 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:11.921 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:12.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:38:12.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:38:12.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:38:12.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:12 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:38:12.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:38:12.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:38:12.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:38:12.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:12 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:38:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:13 vm00.local ceph-mon[49980]: pgmap v2491: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:13 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:38:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:13 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:38:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:13 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:38:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:13 vm03.local ceph-mon[50983]: pgmap v2491: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:13 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:38:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:13 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:38:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:13 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:38:15.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:15 vm00.local ceph-mon[49980]: pgmap v2492: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:15 vm03.local ceph-mon[50983]: pgmap v2492: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:16.922 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:16.923 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:17.002 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:17.002 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:17 vm00.local ceph-mon[49980]: pgmap v2493: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:17 vm03.local ceph-mon[50983]: pgmap v2493: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:19.015 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:18 vm00.local ceph-mon[49980]: pgmap v2494: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:18 vm03.local ceph-mon[50983]: pgmap v2494: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:21 vm00.local ceph-mon[49980]: pgmap v2495: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:21 vm03.local ceph-mon[50983]: pgmap v2495: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:22.004 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:22.004 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:22.030 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:22.031 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:23 vm00.local ceph-mon[49980]: pgmap v2496: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:23 vm03.local ceph-mon[50983]: pgmap v2496: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:25 vm00.local ceph-mon[49980]: pgmap v2497: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:25 vm03.local ceph-mon[50983]: pgmap v2497: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:27.032 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:27.033 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:27.059 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:27.060 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:27 vm00.local ceph-mon[49980]: pgmap v2498: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:27 vm03.local ceph-mon[50983]: pgmap v2498: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:29.015 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:28 vm00.local ceph-mon[49980]: pgmap v2499: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:28 vm03.local ceph-mon[50983]: pgmap v2499: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:31 vm00.local ceph-mon[49980]: pgmap v2500: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:31 vm03.local ceph-mon[50983]: pgmap v2500: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:32.061 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:32.062 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:32.088 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:32.089 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:33 vm00.local ceph-mon[49980]: pgmap v2501: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:33 vm03.local ceph-mon[50983]: pgmap v2501: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:35 vm00.local ceph-mon[49980]: pgmap v2502: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:35 vm03.local ceph-mon[50983]: pgmap v2502: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:38:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:38:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:38:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:38:37.090 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:37.091 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:37.117 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:37.118 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:37 vm00.local ceph-mon[49980]: pgmap v2503: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:37 vm03.local ceph-mon[50983]: pgmap v2503: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:39.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:38 vm00.local ceph-mon[49980]: pgmap v2504: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:38 vm03.local ceph-mon[50983]: pgmap v2504: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:41 vm00.local ceph-mon[49980]: pgmap v2505: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:41 vm03.local ceph-mon[50983]: pgmap v2505: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:42.120 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:42.120 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:42.145 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:42.146 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:43 vm00.local ceph-mon[49980]: pgmap v2506: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:43 vm03.local ceph-mon[50983]: pgmap v2506: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:45 vm00.local ceph-mon[49980]: pgmap v2507: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:45 vm03.local ceph-mon[50983]: pgmap v2507: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:47.147 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:47.147 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:47.179 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:47.180 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:47 vm00.local ceph-mon[49980]: pgmap v2508: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:47 vm03.local ceph-mon[50983]: pgmap v2508: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:49.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:48 vm00.local ceph-mon[49980]: pgmap v2509: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:49.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:48 vm03.local ceph-mon[50983]: pgmap v2509: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:51 vm00.local ceph-mon[49980]: pgmap v2510: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:51 vm03.local ceph-mon[50983]: pgmap v2510: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:52.181 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:52.181 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:52.209 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:52.209 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:53 vm00.local ceph-mon[49980]: pgmap v2511: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:53 vm03.local ceph-mon[50983]: pgmap v2511: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:55.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:55 vm00.local ceph-mon[49980]: pgmap v2512: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:55 vm03.local ceph-mon[50983]: pgmap v2512: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:57.211 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:38:57.211 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:38:57.247 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:38:57.247 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:38:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:57 vm00.local ceph-mon[49980]: pgmap v2513: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:57 vm03.local ceph-mon[50983]: pgmap v2513: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:38:59.017 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:38:58 vm00.local ceph-mon[49980]: pgmap v2514: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:38:59.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:38:58 vm03.local ceph-mon[50983]: pgmap v2514: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:01 vm00.local ceph-mon[49980]: pgmap v2515: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:01 vm03.local ceph-mon[50983]: pgmap v2515: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:02.249 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:02.249 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:02.276 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:02.277 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:03 vm00.local ceph-mon[49980]: pgmap v2516: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:03 vm03.local ceph-mon[50983]: pgmap v2516: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:05.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:05 vm00.local ceph-mon[49980]: pgmap v2517: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:05 vm03.local ceph-mon[50983]: pgmap v2517: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:07.278 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:07.279 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:07.306 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:07.306 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:07 vm00.local ceph-mon[49980]: pgmap v2518: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:07 vm03.local ceph-mon[50983]: pgmap v2518: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:08 vm00.local ceph-mon[49980]: pgmap v2519: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:08 vm03.local ceph-mon[50983]: pgmap v2519: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:11 vm00.local ceph-mon[49980]: pgmap v2520: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:11 vm03.local ceph-mon[50983]: pgmap v2520: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:12.308 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:12.308 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:12.344 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:12.344 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:13.213 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:13 vm03.local ceph-mon[50983]: pgmap v2521: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:13.213 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:13 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:39:13.213 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:13 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:39:13.213 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:13 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:39:13.512 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:13 vm00.local ceph-mon[49980]: pgmap v2521: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:13.512 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:13 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:39:13.512 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:13 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:39:13.512 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:13 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:39:14.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:14 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:14.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:14 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:39:14.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:14 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:14.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:14 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:14.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:14 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:14 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:14 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:39:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:14 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:14 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:14.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:14 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:39:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:15 vm00.local ceph-mon[49980]: pgmap v2522: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:15 vm03.local ceph-mon[50983]: pgmap v2522: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:17.345 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:17.346 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:17.379 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:17.380 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:17 vm00.local ceph-mon[49980]: pgmap v2523: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:17 vm03.local ceph-mon[50983]: pgmap v2523: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:19.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:18 vm03.local ceph-mon[50983]: pgmap v2524: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:18 vm00.local ceph-mon[49980]: pgmap v2524: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:21 vm00.local ceph-mon[49980]: pgmap v2525: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:21 vm03.local ceph-mon[50983]: pgmap v2525: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:22.381 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:22.382 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:22.694 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:22.694 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:23 vm00.local ceph-mon[49980]: pgmap v2526: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:23 vm03.local ceph-mon[50983]: pgmap v2526: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:25 vm00.local ceph-mon[49980]: pgmap v2527: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:25 vm03.local ceph-mon[50983]: pgmap v2527: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:27.696 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:27.696 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:27.722 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:27.723 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:27 vm00.local ceph-mon[49980]: pgmap v2528: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:39:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:27 vm03.local ceph-mon[50983]: pgmap v2528: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:39:29.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:28 vm03.local ceph-mon[50983]: pgmap v2529: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:28 vm00.local ceph-mon[49980]: pgmap v2529: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:31 vm00.local ceph-mon[49980]: pgmap v2530: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:39:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:31 vm03.local ceph-mon[50983]: pgmap v2530: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:39:32.724 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:32.724 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:32.750 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:32.751 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:33 vm00.local ceph-mon[49980]: pgmap v2531: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:39:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:33 vm03.local ceph-mon[50983]: pgmap v2531: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:39:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:35 vm00.local ceph-mon[49980]: pgmap v2532: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:35 vm03.local ceph-mon[50983]: pgmap v2532: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:39:36.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:39:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:39:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:39:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:37 vm00.local ceph-mon[49980]: pgmap v2533: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:37 vm03.local ceph-mon[50983]: pgmap v2533: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:37.752 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:37.753 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:37.778 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:37.778 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:39.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:38 vm03.local ceph-mon[50983]: pgmap v2534: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:38 vm00.local ceph-mon[49980]: pgmap v2534: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:41.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:41 vm00.local ceph-mon[49980]: pgmap v2535: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:41 vm03.local ceph-mon[50983]: pgmap v2535: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:42.780 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:42.780 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:42.954 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:42.954 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:43.346 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:43 vm00.local ceph-mon[49980]: pgmap v2536: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:43 vm03.local ceph-mon[50983]: pgmap v2536: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:45 vm00.local ceph-mon[49980]: pgmap v2537: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:45 vm03.local ceph-mon[50983]: pgmap v2537: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:47 vm00.local ceph-mon[49980]: pgmap v2538: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:47 vm03.local ceph-mon[50983]: pgmap v2538: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:47.955 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:47.956 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:47.982 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:47.983 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:48 vm00.local ceph-mon[49980]: pgmap v2539: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:48 vm03.local ceph-mon[50983]: pgmap v2539: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:51 vm00.local ceph-mon[49980]: pgmap v2540: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:51 vm03.local ceph-mon[50983]: pgmap v2540: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:52.984 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:52.985 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:53.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:53.012 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:53 vm00.local ceph-mon[49980]: pgmap v2541: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:53 vm03.local ceph-mon[50983]: pgmap v2541: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:55 vm00.local ceph-mon[49980]: pgmap v2542: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:55 vm03.local ceph-mon[50983]: pgmap v2542: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:57 vm00.local ceph-mon[49980]: pgmap v2543: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:57 vm03.local ceph-mon[50983]: pgmap v2543: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:39:58.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:39:58.014 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:39:58.040 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:39:58.041 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:39:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:39:58 vm00.local ceph-mon[49980]: pgmap v2544: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:39:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:39:58 vm03.local ceph-mon[50983]: pgmap v2544: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:00.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T06:40:00.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T06:40:01.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:01 vm00.local ceph-mon[49980]: pgmap v2545: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:01 vm03.local ceph-mon[50983]: pgmap v2545: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:03.042 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:03.043 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:03.069 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:03.069 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:03 vm00.local ceph-mon[49980]: pgmap v2546: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:03 vm03.local ceph-mon[50983]: pgmap v2546: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:05 vm00.local ceph-mon[49980]: pgmap v2547: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:05 vm03.local ceph-mon[50983]: pgmap v2547: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:07 vm03.local ceph-mon[50983]: pgmap v2548: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:07 vm00.local ceph-mon[49980]: pgmap v2548: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:08.071 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:08.071 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:08.096 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:08.097 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:09 vm00.local ceph-mon[49980]: pgmap v2549: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:09 vm03.local ceph-mon[50983]: pgmap v2549: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:11 vm00.local ceph-mon[49980]: pgmap v2550: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:11 vm03.local ceph-mon[50983]: pgmap v2550: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:13.098 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:13.099 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:13.125 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:13.126 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:13 vm00.local ceph-mon[49980]: pgmap v2551: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:13 vm03.local ceph-mon[50983]: pgmap v2551: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:15 vm00.local ceph-mon[49980]: pgmap v2552: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:40:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:40:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:40:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:40:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:40:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:15 vm03.local ceph-mon[50983]: pgmap v2552: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:40:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:40:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:40:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:40:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:40:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:17 vm00.local ceph-mon[49980]: pgmap v2553: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:17 vm03.local ceph-mon[50983]: pgmap v2553: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:18.127 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:18.127 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:18.154 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:18.155 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:18 vm00.local ceph-mon[49980]: pgmap v2554: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:18 vm03.local ceph-mon[50983]: pgmap v2554: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:21.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:21 vm00.local ceph-mon[49980]: pgmap v2555: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:21 vm03.local ceph-mon[50983]: pgmap v2555: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:23.156 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:23.157 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:23.183 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:23.184 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:23 vm00.local ceph-mon[49980]: pgmap v2556: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:23 vm03.local ceph-mon[50983]: pgmap v2556: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:25 vm00.local ceph-mon[49980]: pgmap v2557: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:25 vm03.local ceph-mon[50983]: pgmap v2557: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:27 vm00.local ceph-mon[49980]: pgmap v2558: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:27 vm03.local ceph-mon[50983]: pgmap v2558: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:28.185 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:28.186 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:28.211 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:28.212 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:28 vm00.local ceph-mon[49980]: pgmap v2559: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:28 vm03.local ceph-mon[50983]: pgmap v2559: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:31 vm00.local ceph-mon[49980]: pgmap v2560: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:31 vm03.local ceph-mon[50983]: pgmap v2560: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:33.213 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:33.213 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:33.239 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:33.239 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:33 vm00.local ceph-mon[49980]: pgmap v2561: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:33 vm03.local ceph-mon[50983]: pgmap v2561: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:35 vm00.local ceph-mon[49980]: pgmap v2562: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:35 vm03.local ceph-mon[50983]: pgmap v2562: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:40:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:40:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:40:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:40:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:37 vm00.local ceph-mon[49980]: pgmap v2563: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:37 vm03.local ceph-mon[50983]: pgmap v2563: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:38.241 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:38.241 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:38.270 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:38.271 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:38 vm00.local ceph-mon[49980]: pgmap v2564: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:38 vm03.local ceph-mon[50983]: pgmap v2564: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:41.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:41 vm00.local ceph-mon[49980]: pgmap v2565: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:41 vm03.local ceph-mon[50983]: pgmap v2565: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:43.272 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:43.272 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:43.299 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:43.300 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:43 vm00.local ceph-mon[49980]: pgmap v2566: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:43 vm03.local ceph-mon[50983]: pgmap v2566: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:45 vm00.local ceph-mon[49980]: pgmap v2567: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:45 vm03.local ceph-mon[50983]: pgmap v2567: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:47 vm00.local ceph-mon[49980]: pgmap v2568: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:47 vm03.local ceph-mon[50983]: pgmap v2568: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:48.302 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:48.302 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:48.331 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:48.331 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:48 vm00.local ceph-mon[49980]: pgmap v2569: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:48 vm03.local ceph-mon[50983]: pgmap v2569: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:51.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:51 vm00.local ceph-mon[49980]: pgmap v2570: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:51 vm03.local ceph-mon[50983]: pgmap v2570: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:53.332 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:53.333 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:53.359 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:53.359 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:53 vm00.local ceph-mon[49980]: pgmap v2571: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:53 vm03.local ceph-mon[50983]: pgmap v2571: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:55 vm00.local ceph-mon[49980]: pgmap v2572: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:55 vm03.local ceph-mon[50983]: pgmap v2572: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:57 vm00.local ceph-mon[49980]: pgmap v2573: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:57 vm03.local ceph-mon[50983]: pgmap v2573: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:40:58.360 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:40:58.361 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:40:58.389 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:40:58.389 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:40:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:40:58 vm00.local ceph-mon[49980]: pgmap v2574: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:40:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:40:58 vm03.local ceph-mon[50983]: pgmap v2574: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:01 vm00.local ceph-mon[49980]: pgmap v2575: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:01 vm03.local ceph-mon[50983]: pgmap v2575: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:03.390 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:03.391 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:03.418 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:03.418 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:03 vm00.local ceph-mon[49980]: pgmap v2576: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:03 vm03.local ceph-mon[50983]: pgmap v2576: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:05 vm00.local ceph-mon[49980]: pgmap v2577: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:05 vm03.local ceph-mon[50983]: pgmap v2577: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:07 vm00.local ceph-mon[49980]: pgmap v2578: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:07 vm03.local ceph-mon[50983]: pgmap v2578: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:08.419 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:08.420 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:08.445 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:08.446 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:08 vm00.local ceph-mon[49980]: pgmap v2579: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:08 vm03.local ceph-mon[50983]: pgmap v2579: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:11.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:11 vm00.local ceph-mon[49980]: pgmap v2580: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:11 vm03.local ceph-mon[50983]: pgmap v2580: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:13.343 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:13 vm00.local ceph-mon[49980]: pgmap v2581: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:13.447 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:13.448 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:13.474 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:13.474 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:13 vm03.local ceph-mon[50983]: pgmap v2581: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:15 vm00.local ceph-mon[49980]: pgmap v2582: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:41:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:41:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:41:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:41:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:41:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:15 vm03.local ceph-mon[50983]: pgmap v2582: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:41:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:41:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:41:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:41:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:41:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:17 vm00.local ceph-mon[49980]: pgmap v2583: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:17 vm03.local ceph-mon[50983]: pgmap v2583: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:18.476 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:18.476 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:18.504 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:18.505 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:18 vm00.local ceph-mon[49980]: pgmap v2584: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:18 vm03.local ceph-mon[50983]: pgmap v2584: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:21.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:21 vm00.local ceph-mon[49980]: pgmap v2585: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:21 vm03.local ceph-mon[50983]: pgmap v2585: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:23.506 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:23.507 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:23 vm00.local ceph-mon[49980]: pgmap v2586: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:23.535 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:23.535 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:23 vm03.local ceph-mon[50983]: pgmap v2586: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:25 vm00.local ceph-mon[49980]: pgmap v2587: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:25 vm03.local ceph-mon[50983]: pgmap v2587: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:27 vm00.local ceph-mon[49980]: pgmap v2588: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:27 vm03.local ceph-mon[50983]: pgmap v2588: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:28.537 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:28.537 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:28.564 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:28.565 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:29 vm00.local ceph-mon[49980]: pgmap v2589: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:29 vm03.local ceph-mon[50983]: pgmap v2589: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:31 vm00.local ceph-mon[49980]: pgmap v2590: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:31 vm03.local ceph-mon[50983]: pgmap v2590: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:33.567 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:33.567 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:33.595 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:33.595 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:33 vm00.local ceph-mon[49980]: pgmap v2591: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:33 vm03.local ceph-mon[50983]: pgmap v2591: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:35 vm00.local ceph-mon[49980]: pgmap v2592: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:41:35.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:41:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:35 vm03.local ceph-mon[50983]: pgmap v2592: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:41:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:41:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:37 vm00.local ceph-mon[49980]: pgmap v2593: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:37 vm03.local ceph-mon[50983]: pgmap v2593: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:38.597 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:38.597 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:38.625 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:38.626 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:38 vm00.local ceph-mon[49980]: pgmap v2594: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:38 vm03.local ceph-mon[50983]: pgmap v2594: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:41 vm00.local ceph-mon[49980]: pgmap v2595: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:41 vm03.local ceph-mon[50983]: pgmap v2595: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:43 vm00.local ceph-mon[49980]: pgmap v2596: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:43 vm03.local ceph-mon[50983]: pgmap v2596: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:43.627 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:43.627 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:43.655 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:43.656 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:45 vm00.local ceph-mon[49980]: pgmap v2597: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:45 vm03.local ceph-mon[50983]: pgmap v2597: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:47 vm00.local ceph-mon[49980]: pgmap v2598: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:47 vm03.local ceph-mon[50983]: pgmap v2598: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:48.657 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:48.658 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:48.686 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:48.686 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:48 vm00.local ceph-mon[49980]: pgmap v2599: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:48 vm03.local ceph-mon[50983]: pgmap v2599: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:51 vm00.local ceph-mon[49980]: pgmap v2600: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:51 vm03.local ceph-mon[50983]: pgmap v2600: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:53 vm00.local ceph-mon[49980]: pgmap v2601: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:53 vm03.local ceph-mon[50983]: pgmap v2601: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:53.688 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:53.688 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:53.716 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:53.716 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:55 vm00.local ceph-mon[49980]: pgmap v2602: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:55 vm03.local ceph-mon[50983]: pgmap v2602: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:57.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:57 vm00.local ceph-mon[49980]: pgmap v2603: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:57 vm03.local ceph-mon[50983]: pgmap v2603: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:41:58.718 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:41:58.718 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:41:59.300 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:41:59.300 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:41:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:41:59 vm00.local ceph-mon[49980]: pgmap v2604: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:41:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:41:59 vm03.local ceph-mon[50983]: pgmap v2604: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:01 vm00.local ceph-mon[49980]: pgmap v2605: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:01 vm03.local ceph-mon[50983]: pgmap v2605: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:03 vm00.local ceph-mon[49980]: pgmap v2606: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:03 vm03.local ceph-mon[50983]: pgmap v2606: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:04.301 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:04.302 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:04.330 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:04.331 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:05 vm00.local ceph-mon[49980]: pgmap v2607: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:05 vm03.local ceph-mon[50983]: pgmap v2607: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:07 vm00.local ceph-mon[49980]: pgmap v2608: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:07 vm03.local ceph-mon[50983]: pgmap v2608: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:08 vm00.local ceph-mon[49980]: pgmap v2609: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:08 vm03.local ceph-mon[50983]: pgmap v2609: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:09.332 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:09.332 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:09.375 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:09.376 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:11 vm00.local ceph-mon[49980]: pgmap v2610: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:11 vm03.local ceph-mon[50983]: pgmap v2610: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:13 vm00.local ceph-mon[49980]: pgmap v2611: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:13 vm03.local ceph-mon[50983]: pgmap v2611: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:14.377 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:14.378 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:14.404 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:14.404 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:15 vm00.local ceph-mon[49980]: pgmap v2612: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:42:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:42:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:15 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:42:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:15 vm03.local ceph-mon[50983]: pgmap v2612: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:42:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:42:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:15 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:42:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:42:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:42:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:42:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:42:17.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:17 vm00.local ceph-mon[49980]: pgmap v2613: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:17 vm03.local ceph-mon[50983]: pgmap v2613: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:19 vm03.local ceph-mon[50983]: pgmap v2614: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:19.406 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:19.406 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:19.432 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:19.432 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:19.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:19 vm00.local ceph-mon[49980]: pgmap v2614: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:21 vm00.local ceph-mon[49980]: pgmap v2615: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:21 vm03.local ceph-mon[50983]: pgmap v2615: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:23 vm00.local ceph-mon[49980]: pgmap v2616: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:23 vm03.local ceph-mon[50983]: pgmap v2616: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:24.433 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:24.434 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:24.486 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:24.486 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:25 vm00.local ceph-mon[49980]: pgmap v2617: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:25 vm03.local ceph-mon[50983]: pgmap v2617: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:27 vm00.local ceph-mon[49980]: pgmap v2618: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:27 vm03.local ceph-mon[50983]: pgmap v2618: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:28 vm00.local ceph-mon[49980]: pgmap v2619: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:28 vm03.local ceph-mon[50983]: pgmap v2619: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:29.487 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:29.488 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:29.515 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:29.516 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:31 vm00.local ceph-mon[49980]: pgmap v2620: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:31 vm03.local ceph-mon[50983]: pgmap v2620: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:33 vm00.local ceph-mon[49980]: pgmap v2621: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:33 vm03.local ceph-mon[50983]: pgmap v2621: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:34.517 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:34.518 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:34.544 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:34.545 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:35 vm00.local ceph-mon[49980]: pgmap v2622: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:35 vm03.local ceph-mon[50983]: pgmap v2622: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:42:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:42:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:42:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:42:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:37 vm00.local ceph-mon[49980]: pgmap v2623: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:37 vm03.local ceph-mon[50983]: pgmap v2623: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:38 vm00.local ceph-mon[49980]: pgmap v2624: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:38 vm03.local ceph-mon[50983]: pgmap v2624: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:39.546 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:39.547 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:39.575 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:39.575 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:41 vm00.local ceph-mon[49980]: pgmap v2625: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:41 vm03.local ceph-mon[50983]: pgmap v2625: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:43 vm00.local ceph-mon[49980]: pgmap v2626: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:43 vm03.local ceph-mon[50983]: pgmap v2626: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:44.576 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:44.577 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:44.602 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:44.603 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:45 vm00.local ceph-mon[49980]: pgmap v2627: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:45 vm03.local ceph-mon[50983]: pgmap v2627: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:47 vm00.local ceph-mon[49980]: pgmap v2628: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:47 vm03.local ceph-mon[50983]: pgmap v2628: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:48 vm00.local ceph-mon[49980]: pgmap v2629: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:48 vm03.local ceph-mon[50983]: pgmap v2629: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:49.604 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:49.605 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:49.631 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:49.632 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:51 vm00.local ceph-mon[49980]: pgmap v2630: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:51 vm03.local ceph-mon[50983]: pgmap v2630: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:53.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:53 vm00.local ceph-mon[49980]: pgmap v2631: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:53 vm03.local ceph-mon[50983]: pgmap v2631: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:54.633 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:54.634 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:54.662 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:54.663 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:42:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:55 vm00.local ceph-mon[49980]: pgmap v2632: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:55 vm03.local ceph-mon[50983]: pgmap v2632: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:57 vm00.local ceph-mon[49980]: pgmap v2633: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:57 vm03.local ceph-mon[50983]: pgmap v2633: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:42:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:42:58 vm00.local ceph-mon[49980]: pgmap v2634: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:42:58 vm03.local ceph-mon[50983]: pgmap v2634: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:42:59.664 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:42:59.664 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:42:59.691 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:42:59.691 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:01 vm00.local ceph-mon[49980]: pgmap v2635: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:01 vm03.local ceph-mon[50983]: pgmap v2635: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:03 vm00.local ceph-mon[49980]: pgmap v2636: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:03 vm03.local ceph-mon[50983]: pgmap v2636: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:04.692 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:04.693 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:04.718 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:04.719 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:05 vm00.local ceph-mon[49980]: pgmap v2637: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:05 vm03.local ceph-mon[50983]: pgmap v2637: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:07 vm00.local ceph-mon[49980]: pgmap v2638: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:07 vm03.local ceph-mon[50983]: pgmap v2638: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:08 vm00.local ceph-mon[49980]: pgmap v2639: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:08 vm03.local ceph-mon[50983]: pgmap v2639: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:09.720 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:09.721 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:09.748 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:09.748 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:11 vm00.local ceph-mon[49980]: pgmap v2640: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:11 vm03.local ceph-mon[50983]: pgmap v2640: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:13.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:13 vm00.local ceph-mon[49980]: pgmap v2641: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:13 vm03.local ceph-mon[50983]: pgmap v2641: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:14.750 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:14.750 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:14.776 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:14.776 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:15.451 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:15 vm00.local ceph-mon[49980]: pgmap v2642: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:15 vm03.local ceph-mon[50983]: pgmap v2642: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:43:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:43:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:43:16.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:43:16.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:43:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:43:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:43:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:43:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:43:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:43:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:17 vm00.local ceph-mon[49980]: pgmap v2643: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:17 vm03.local ceph-mon[50983]: pgmap v2643: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:18 vm00.local ceph-mon[49980]: pgmap v2644: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:18 vm03.local ceph-mon[50983]: pgmap v2644: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:19.778 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:19.778 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:19.804 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:19.804 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:21 vm00.local ceph-mon[49980]: pgmap v2645: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:21 vm03.local ceph-mon[50983]: pgmap v2645: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:23 vm00.local ceph-mon[49980]: pgmap v2646: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:23 vm03.local ceph-mon[50983]: pgmap v2646: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:24.805 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:24.806 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:24.831 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:24.832 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:25 vm00.local ceph-mon[49980]: pgmap v2647: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:25 vm03.local ceph-mon[50983]: pgmap v2647: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:27 vm00.local ceph-mon[49980]: pgmap v2648: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:27 vm03.local ceph-mon[50983]: pgmap v2648: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:28 vm00.local ceph-mon[49980]: pgmap v2649: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:28 vm03.local ceph-mon[50983]: pgmap v2649: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:29.833 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:29.834 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:29.859 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:29.860 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:31 vm00.local ceph-mon[49980]: pgmap v2650: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:31 vm03.local ceph-mon[50983]: pgmap v2650: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:33 vm00.local ceph-mon[49980]: pgmap v2651: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:33 vm03.local ceph-mon[50983]: pgmap v2651: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:34.861 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:34.862 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:34.889 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:34.889 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:35 vm00.local ceph-mon[49980]: pgmap v2652: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:35 vm03.local ceph-mon[50983]: pgmap v2652: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:43:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:43:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:43:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:43:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:37 vm00.local ceph-mon[49980]: pgmap v2653: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:37 vm03.local ceph-mon[50983]: pgmap v2653: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:39 vm00.local ceph-mon[49980]: pgmap v2654: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:39 vm03.local ceph-mon[50983]: pgmap v2654: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:39.891 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:39.891 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:39.918 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:39.919 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:41 vm00.local ceph-mon[49980]: pgmap v2655: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:41 vm03.local ceph-mon[50983]: pgmap v2655: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:43 vm00.local ceph-mon[49980]: pgmap v2656: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:43 vm03.local ceph-mon[50983]: pgmap v2656: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:44.920 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:44.920 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:44.945 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:44.946 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:45 vm00.local ceph-mon[49980]: pgmap v2657: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:45 vm03.local ceph-mon[50983]: pgmap v2657: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:47 vm00.local ceph-mon[49980]: pgmap v2658: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:47 vm03.local ceph-mon[50983]: pgmap v2658: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:48 vm00.local ceph-mon[49980]: pgmap v2659: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:48 vm03.local ceph-mon[50983]: pgmap v2659: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:49.947 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:49.948 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:49.975 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:49.976 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:51 vm00.local ceph-mon[49980]: pgmap v2660: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:51 vm03.local ceph-mon[50983]: pgmap v2660: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:53 vm00.local ceph-mon[49980]: pgmap v2661: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:53 vm03.local ceph-mon[50983]: pgmap v2661: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:54.977 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:43:54.978 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:43:55.003 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:43:55.004 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:43:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:55 vm00.local ceph-mon[49980]: pgmap v2662: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:55 vm03.local ceph-mon[50983]: pgmap v2662: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:57 vm00.local ceph-mon[49980]: pgmap v2663: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:57 vm03.local ceph-mon[50983]: pgmap v2663: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:43:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:43:58 vm00.local ceph-mon[49980]: pgmap v2664: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:43:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:43:58 vm03.local ceph-mon[50983]: pgmap v2664: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:00.005 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:00.005 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:00.033 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:00.034 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:01 vm03.local ceph-mon[50983]: pgmap v2665: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:01 vm00.local ceph-mon[49980]: pgmap v2665: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:03.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:03 vm00.local ceph-mon[49980]: pgmap v2666: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:03 vm03.local ceph-mon[50983]: pgmap v2666: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:05.035 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:05.036 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:05.077 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:05.078 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:05 vm00.local ceph-mon[49980]: pgmap v2667: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:05 vm03.local ceph-mon[50983]: pgmap v2667: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:07 vm00.local ceph-mon[49980]: pgmap v2668: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:07 vm03.local ceph-mon[50983]: pgmap v2668: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:08 vm00.local ceph-mon[49980]: pgmap v2669: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:09 vm03.local ceph-mon[50983]: pgmap v2669: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:10.079 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:10.080 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:10.110 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:10.110 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:11 vm00.local ceph-mon[49980]: pgmap v2670: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:11 vm03.local ceph-mon[50983]: pgmap v2670: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:13 vm00.local ceph-mon[49980]: pgmap v2671: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:13 vm03.local ceph-mon[50983]: pgmap v2671: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:15.112 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:15.112 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:15.140 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:15.140 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:15.486 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:15 vm00.local ceph-mon[49980]: pgmap v2672: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:15.487 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:15 vm03.local ceph-mon[50983]: pgmap v2672: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:44:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:44:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:44:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:44:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:44:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:44:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:44:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:44:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:44:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:44:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:17 vm00.local ceph-mon[49980]: pgmap v2673: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:17 vm03.local ceph-mon[50983]: pgmap v2673: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:19 vm00.local ceph-mon[49980]: pgmap v2674: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:19 vm03.local ceph-mon[50983]: pgmap v2674: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:20.142 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:20.142 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:20.168 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:20.168 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:21 vm00.local ceph-mon[49980]: pgmap v2675: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:21 vm03.local ceph-mon[50983]: pgmap v2675: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:23.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:23 vm00.local ceph-mon[49980]: pgmap v2676: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:23 vm03.local ceph-mon[50983]: pgmap v2676: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:25.170 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:25.170 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:25.196 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:25.196 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:25 vm00.local ceph-mon[49980]: pgmap v2677: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:25 vm03.local ceph-mon[50983]: pgmap v2677: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:27 vm00.local ceph-mon[49980]: pgmap v2678: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:27 vm03.local ceph-mon[50983]: pgmap v2678: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:29 vm00.local ceph-mon[49980]: pgmap v2679: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:29 vm03.local ceph-mon[50983]: pgmap v2679: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:30.198 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:30.198 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:30.224 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:30.224 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:31 vm00.local ceph-mon[49980]: pgmap v2680: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:31 vm03.local ceph-mon[50983]: pgmap v2680: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:33.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:33 vm00.local ceph-mon[49980]: pgmap v2681: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:33 vm03.local ceph-mon[50983]: pgmap v2681: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:35.226 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:35.226 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:35.254 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:35.254 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:35 vm00.local ceph-mon[49980]: pgmap v2682: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:35 vm03.local ceph-mon[50983]: pgmap v2682: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:44:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:44:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:44:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:44:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:37 vm00.local ceph-mon[49980]: pgmap v2683: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:37 vm03.local ceph-mon[50983]: pgmap v2683: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:39 vm00.local ceph-mon[49980]: pgmap v2684: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:39 vm03.local ceph-mon[50983]: pgmap v2684: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:40.256 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:40.256 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:40.282 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:40.283 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:41 vm00.local ceph-mon[49980]: pgmap v2685: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:41 vm03.local ceph-mon[50983]: pgmap v2685: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:43.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:43 vm00.local ceph-mon[49980]: pgmap v2686: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:43 vm03.local ceph-mon[50983]: pgmap v2686: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:45.284 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:45.285 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:45.312 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:45.313 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:45 vm00.local ceph-mon[49980]: pgmap v2687: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:45 vm03.local ceph-mon[50983]: pgmap v2687: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:47 vm00.local ceph-mon[49980]: pgmap v2688: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:47 vm03.local ceph-mon[50983]: pgmap v2688: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:49 vm00.local ceph-mon[49980]: pgmap v2689: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:49 vm03.local ceph-mon[50983]: pgmap v2689: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:50.315 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:50.315 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:50.342 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:50.342 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:51 vm00.local ceph-mon[49980]: pgmap v2690: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:51 vm03.local ceph-mon[50983]: pgmap v2690: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:53 vm00.local ceph-mon[49980]: pgmap v2691: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:53 vm03.local ceph-mon[50983]: pgmap v2691: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:55.344 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:44:55.344 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:44:55.373 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:44:55.373 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:44:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:55 vm00.local ceph-mon[49980]: pgmap v2692: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:55 vm03.local ceph-mon[50983]: pgmap v2692: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:57 vm00.local ceph-mon[49980]: pgmap v2693: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:57 vm03.local ceph-mon[50983]: pgmap v2693: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:44:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:44:59 vm00.local ceph-mon[49980]: pgmap v2694: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:44:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:44:59 vm03.local ceph-mon[50983]: pgmap v2694: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:00.374 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:00.375 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:00.402 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:00.402 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:01.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:01 vm00.local ceph-mon[49980]: pgmap v2695: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:01 vm03.local ceph-mon[50983]: pgmap v2695: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:03.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:03 vm00.local ceph-mon[49980]: pgmap v2696: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:03 vm03.local ceph-mon[50983]: pgmap v2696: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:05.404 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:05.404 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:05.431 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:05.431 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:05 vm00.local ceph-mon[49980]: pgmap v2697: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:05 vm03.local ceph-mon[50983]: pgmap v2697: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:07.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:07 vm00.local ceph-mon[49980]: pgmap v2698: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:07 vm03.local ceph-mon[50983]: pgmap v2698: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:09.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:09 vm00.local ceph-mon[49980]: pgmap v2699: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:09 vm03.local ceph-mon[50983]: pgmap v2699: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:10.432 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:10.433 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:10.459 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:10.460 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:11 vm00.local ceph-mon[49980]: pgmap v2700: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:11 vm03.local ceph-mon[50983]: pgmap v2700: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:13.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:13 vm00.local ceph-mon[49980]: pgmap v2701: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:13 vm03.local ceph-mon[50983]: pgmap v2701: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:15.461 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:15.462 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:15.492 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:15.493 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:15 vm00.local ceph-mon[49980]: pgmap v2702: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:15 vm03.local ceph-mon[50983]: pgmap v2702: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:45:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:45:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:45:16.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:45:16.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:45:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:45:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:45:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:45:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:45:16.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:45:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:17 vm00.local ceph-mon[49980]: pgmap v2703: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:17 vm03.local ceph-mon[50983]: pgmap v2703: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:19.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:19 vm00.local ceph-mon[49980]: pgmap v2704: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:19 vm03.local ceph-mon[50983]: pgmap v2704: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:20.494 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:20.495 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:20.521 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:20.522 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:21 vm00.local ceph-mon[49980]: pgmap v2705: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:21 vm03.local ceph-mon[50983]: pgmap v2705: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:23.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:23 vm00.local ceph-mon[49980]: pgmap v2706: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:23 vm03.local ceph-mon[50983]: pgmap v2706: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:25.524 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:25.524 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:25 vm00.local ceph-mon[49980]: pgmap v2707: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:25.550 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:25.551 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:25 vm03.local ceph-mon[50983]: pgmap v2707: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:27 vm00.local ceph-mon[49980]: pgmap v2708: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:27 vm03.local ceph-mon[50983]: pgmap v2708: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:29.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:29 vm00.local ceph-mon[49980]: pgmap v2709: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:29 vm03.local ceph-mon[50983]: pgmap v2709: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:30.553 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:30.553 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:30.579 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:30.579 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:31 vm00.local ceph-mon[49980]: pgmap v2710: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:31 vm03.local ceph-mon[50983]: pgmap v2710: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:33 vm00.local ceph-mon[49980]: pgmap v2711: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:33 vm03.local ceph-mon[50983]: pgmap v2711: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:35 vm00.local ceph-mon[49980]: pgmap v2712: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:35 vm03.local ceph-mon[50983]: pgmap v2712: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:35.580 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:35.581 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:35.606 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:35.606 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:45:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:45:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:45:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:45:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:37 vm00.local ceph-mon[49980]: pgmap v2713: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:37 vm03.local ceph-mon[50983]: pgmap v2713: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:39.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:39 vm00.local ceph-mon[49980]: pgmap v2714: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:39 vm03.local ceph-mon[50983]: pgmap v2714: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:40.608 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:40.608 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:40.634 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:40.635 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:41 vm00.local ceph-mon[49980]: pgmap v2715: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:41 vm03.local ceph-mon[50983]: pgmap v2715: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:43.340 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:43 vm00.local ceph-mon[49980]: pgmap v2716: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:43 vm03.local ceph-mon[50983]: pgmap v2716: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:45 vm00.local ceph-mon[49980]: pgmap v2717: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:45 vm03.local ceph-mon[50983]: pgmap v2717: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:45.636 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:45.637 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:45.662 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:45.663 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:47 vm00.local ceph-mon[49980]: pgmap v2718: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:47 vm03.local ceph-mon[50983]: pgmap v2718: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:49.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:49 vm00.local ceph-mon[49980]: pgmap v2719: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:49 vm03.local ceph-mon[50983]: pgmap v2719: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:50.665 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:50.665 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:50.693 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:50.693 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:51 vm00.local ceph-mon[49980]: pgmap v2720: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:51 vm03.local ceph-mon[50983]: pgmap v2720: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:53 vm00.local ceph-mon[49980]: pgmap v2721: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:53 vm03.local ceph-mon[50983]: pgmap v2721: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:55 vm00.local ceph-mon[49980]: pgmap v2722: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:55 vm03.local ceph-mon[50983]: pgmap v2722: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:55.695 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:45:55.695 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:45:55.722 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:45:55.722 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:45:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:57 vm00.local ceph-mon[49980]: pgmap v2723: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:57 vm03.local ceph-mon[50983]: pgmap v2723: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:45:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:45:59 vm03.local ceph-mon[50983]: pgmap v2724: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:45:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:45:59 vm00.local ceph-mon[49980]: pgmap v2724: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:00.724 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:00.725 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:00.762 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:00.763 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:01 vm00.local ceph-mon[49980]: pgmap v2725: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:01 vm03.local ceph-mon[50983]: pgmap v2725: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:03 vm00.local ceph-mon[49980]: pgmap v2726: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:03 vm03.local ceph-mon[50983]: pgmap v2726: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:05.764 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:05.765 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:05 vm00.local ceph-mon[49980]: pgmap v2727: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:05.799 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:05.799 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:05 vm03.local ceph-mon[50983]: pgmap v2727: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:07 vm00.local ceph-mon[49980]: pgmap v2728: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:07 vm03.local ceph-mon[50983]: pgmap v2728: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:09.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:09 vm03.local ceph-mon[50983]: pgmap v2729: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:09 vm00.local ceph-mon[49980]: pgmap v2729: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:10.801 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:10.801 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:10.828 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:10.829 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:11 vm00.local ceph-mon[49980]: pgmap v2730: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:11 vm03.local ceph-mon[50983]: pgmap v2730: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:13 vm00.local ceph-mon[49980]: pgmap v2731: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:13 vm03.local ceph-mon[50983]: pgmap v2731: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:15 vm00.local ceph-mon[49980]: pgmap v2732: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:15 vm03.local ceph-mon[50983]: pgmap v2732: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:15.830 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:15.831 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:15.857 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:15.857 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:16.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:46:16.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:46:16.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:46:16.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:46:16.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:46:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:46:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:46:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:46:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:46:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:46:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:17 vm00.local ceph-mon[49980]: pgmap v2733: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:17 vm03.local ceph-mon[50983]: pgmap v2733: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:19.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:19 vm03.local ceph-mon[50983]: pgmap v2734: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:19.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:19 vm00.local ceph-mon[49980]: pgmap v2734: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:20.859 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:20.859 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:20.887 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:20.888 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:21 vm00.local ceph-mon[49980]: pgmap v2735: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:21 vm03.local ceph-mon[50983]: pgmap v2735: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:23 vm00.local ceph-mon[49980]: pgmap v2736: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:23 vm03.local ceph-mon[50983]: pgmap v2736: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:25 vm00.local ceph-mon[49980]: pgmap v2737: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:25 vm03.local ceph-mon[50983]: pgmap v2737: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:25.889 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:25.890 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:25.918 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:25.919 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:27 vm00.local ceph-mon[49980]: pgmap v2738: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:27 vm03.local ceph-mon[50983]: pgmap v2738: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:29 vm03.local ceph-mon[50983]: pgmap v2739: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:29.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:29 vm00.local ceph-mon[49980]: pgmap v2739: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:30.920 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:30.921 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:30.949 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:30.950 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:31 vm00.local ceph-mon[49980]: pgmap v2740: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:31 vm03.local ceph-mon[50983]: pgmap v2740: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:33.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:33 vm00.local ceph-mon[49980]: pgmap v2741: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:33 vm03.local ceph-mon[50983]: pgmap v2741: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:35 vm00.local ceph-mon[49980]: pgmap v2742: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:35 vm03.local ceph-mon[50983]: pgmap v2742: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:35.952 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:35.952 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:35.982 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:35.983 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:46:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:46:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:46:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:46:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:37 vm00.local ceph-mon[49980]: pgmap v2743: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:37 vm03.local ceph-mon[50983]: pgmap v2743: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:39.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:39 vm03.local ceph-mon[50983]: pgmap v2744: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:39 vm00.local ceph-mon[49980]: pgmap v2744: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:40.984 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:40.985 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:41.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:41.012 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:41 vm00.local ceph-mon[49980]: pgmap v2745: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:41 vm03.local ceph-mon[50983]: pgmap v2745: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:43 vm00.local ceph-mon[49980]: pgmap v2746: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:43 vm03.local ceph-mon[50983]: pgmap v2746: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:45 vm00.local ceph-mon[49980]: pgmap v2747: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:45 vm03.local ceph-mon[50983]: pgmap v2747: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:46.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:46.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:46.041 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:46.041 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:47 vm00.local ceph-mon[49980]: pgmap v2748: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:47 vm03.local ceph-mon[50983]: pgmap v2748: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:49.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:49 vm03.local ceph-mon[50983]: pgmap v2749: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:49.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:49 vm00.local ceph-mon[49980]: pgmap v2749: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:51.043 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:51.043 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:51.070 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:51.070 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:51 vm00.local ceph-mon[49980]: pgmap v2750: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:51 vm03.local ceph-mon[50983]: pgmap v2750: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:53 vm00.local ceph-mon[49980]: pgmap v2751: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:53 vm03.local ceph-mon[50983]: pgmap v2751: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:55 vm00.local ceph-mon[49980]: pgmap v2752: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:55 vm03.local ceph-mon[50983]: pgmap v2752: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:56.072 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:46:56.072 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:46:56.099 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:46:56.100 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:46:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:57 vm00.local ceph-mon[49980]: pgmap v2753: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:57 vm03.local ceph-mon[50983]: pgmap v2753: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:46:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:46:59 vm03.local ceph-mon[50983]: pgmap v2754: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:46:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:46:59 vm00.local ceph-mon[49980]: pgmap v2754: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:01.101 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:01.101 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:01.134 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:01.134 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:01 vm00.local ceph-mon[49980]: pgmap v2755: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:01 vm03.local ceph-mon[50983]: pgmap v2755: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:03 vm00.local ceph-mon[49980]: pgmap v2756: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:03 vm03.local ceph-mon[50983]: pgmap v2756: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:05 vm00.local ceph-mon[49980]: pgmap v2757: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:05 vm03.local ceph-mon[50983]: pgmap v2757: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:06.136 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:06.136 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:06.164 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:06.164 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:07 vm00.local ceph-mon[49980]: pgmap v2758: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:07 vm03.local ceph-mon[50983]: pgmap v2758: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:09.360 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:09 vm03.local ceph-mon[50983]: pgmap v2759: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:09 vm00.local ceph-mon[49980]: pgmap v2759: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:11.166 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:11.166 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:11.194 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:11.194 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:11 vm00.local ceph-mon[49980]: pgmap v2760: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:11 vm03.local ceph-mon[50983]: pgmap v2760: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:13 vm00.local ceph-mon[49980]: pgmap v2761: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:13 vm03.local ceph-mon[50983]: pgmap v2761: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:15 vm00.local ceph-mon[49980]: pgmap v2762: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:15 vm03.local ceph-mon[50983]: pgmap v2762: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:16.195 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:16.196 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:16.229 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:16.230 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:16.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:47:16.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:47:16.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:16 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:47:16.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:47:16.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:47:16.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:16 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:47:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:17 vm00.local ceph-mon[49980]: pgmap v2763: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:17 vm03.local ceph-mon[50983]: pgmap v2763: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:47:19.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:19 vm03.local ceph-mon[50983]: pgmap v2764: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:19.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:19 vm00.local ceph-mon[49980]: pgmap v2764: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:21.232 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:21.232 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:21.261 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:21.262 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:21.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:21 vm00.local ceph-mon[49980]: pgmap v2765: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:21.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:21 vm03.local ceph-mon[50983]: pgmap v2765: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:23.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:23 vm00.local ceph-mon[49980]: pgmap v2766: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:23.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:23 vm03.local ceph-mon[50983]: pgmap v2766: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:25 vm00.local ceph-mon[49980]: pgmap v2767: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:25 vm03.local ceph-mon[50983]: pgmap v2767: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:26.263 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:26.264 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:26.290 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:26.291 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:27 vm03.local ceph-mon[50983]: pgmap v2768: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:27 vm00.local ceph-mon[49980]: pgmap v2768: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:29.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:29 vm03.local ceph-mon[50983]: pgmap v2769: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:29.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:29 vm00.local ceph-mon[49980]: pgmap v2769: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:31.292 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:31.293 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:31.319 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:31.319 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:31 vm00.local ceph-mon[49980]: pgmap v2770: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:31 vm03.local ceph-mon[50983]: pgmap v2770: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:33 vm00.local ceph-mon[49980]: pgmap v2771: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:33.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:33 vm03.local ceph-mon[50983]: pgmap v2771: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:35 vm00.local ceph-mon[49980]: pgmap v2772: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:35 vm03.local ceph-mon[50983]: pgmap v2772: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:36.320 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:36.321 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:36.473 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:36.474 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:47:36.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:47:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:47:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:47:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:37 vm00.local ceph-mon[49980]: pgmap v2773: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:37 vm03.local ceph-mon[50983]: pgmap v2773: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:39.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:39 vm03.local ceph-mon[50983]: pgmap v2774: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:39 vm00.local ceph-mon[49980]: pgmap v2774: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:41.475 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:41.476 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:41.503 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:41.503 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:41 vm00.local ceph-mon[49980]: pgmap v2775: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:41 vm03.local ceph-mon[50983]: pgmap v2775: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:43.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:43 vm00.local ceph-mon[49980]: pgmap v2776: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:43 vm03.local ceph-mon[50983]: pgmap v2776: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:45 vm00.local ceph-mon[49980]: pgmap v2777: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:45 vm03.local ceph-mon[50983]: pgmap v2777: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:46.504 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:46.505 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:46.530 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:46.531 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:47 vm00.local ceph-mon[49980]: pgmap v2778: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:47 vm03.local ceph-mon[50983]: pgmap v2778: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:49.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:49 vm03.local ceph-mon[50983]: pgmap v2779: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:49 vm00.local ceph-mon[49980]: pgmap v2779: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:51 vm00.local ceph-mon[49980]: pgmap v2780: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:51.532 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:51.532 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:51 vm03.local ceph-mon[50983]: pgmap v2780: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:51.558 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:51.559 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:53 vm00.local ceph-mon[49980]: pgmap v2781: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:53.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:53 vm03.local ceph-mon[50983]: pgmap v2781: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:55 vm00.local ceph-mon[49980]: pgmap v2782: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:55 vm03.local ceph-mon[50983]: pgmap v2782: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:56.560 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:47:56.560 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:47:56.586 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:47:56.586 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:47:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:57 vm00.local ceph-mon[49980]: pgmap v2783: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:57 vm03.local ceph-mon[50983]: pgmap v2783: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:47:59.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:47:59 vm03.local ceph-mon[50983]: pgmap v2784: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:47:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:47:59 vm00.local ceph-mon[49980]: pgmap v2784: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:01 vm00.local ceph-mon[49980]: pgmap v2785: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:01 vm03.local ceph-mon[50983]: pgmap v2785: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:01.588 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:01.588 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:01.616 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:01.617 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:03 vm00.local ceph-mon[49980]: pgmap v2786: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:03 vm03.local ceph-mon[50983]: pgmap v2786: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:05 vm00.local ceph-mon[49980]: pgmap v2787: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:05 vm03.local ceph-mon[50983]: pgmap v2787: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:06.618 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:06.619 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:06.646 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:06.647 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:07 vm00.local ceph-mon[49980]: pgmap v2788: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:07 vm03.local ceph-mon[50983]: pgmap v2788: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:09 vm00.local ceph-mon[49980]: pgmap v2789: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:09 vm03.local ceph-mon[50983]: pgmap v2789: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:11 vm00.local ceph-mon[49980]: pgmap v2790: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:11 vm03.local ceph-mon[50983]: pgmap v2790: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:11.648 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:11.648 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:11.674 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:11.674 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:13 vm00.local ceph-mon[49980]: pgmap v2791: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:13 vm03.local ceph-mon[50983]: pgmap v2791: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:15.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:15 vm00.local ceph-mon[49980]: pgmap v2792: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:15 vm03.local ceph-mon[50983]: pgmap v2792: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:16.676 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:16.677 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:16.702 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:16.703 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:17.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:17 vm00.local ceph-mon[49980]: pgmap v2793: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:17.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:48:17.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:48:17.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:48:17.253 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:17 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:17.254 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:17 vm03.local ceph-mon[50983]: pgmap v2793: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:17.254 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:48:17.254 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:48:17.255 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:48:17.255 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:17 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:19.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:19 vm00.local ceph-mon[49980]: pgmap v2794: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:19.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:19 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:19.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:19 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:48:19.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:19 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:19.630 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:19 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:19 vm03.local ceph-mon[50983]: pgmap v2794: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:19 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:19 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:48:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:19 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:19 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:48:21.705 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:21.706 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:21.738 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:21.738 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:21.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:21 vm00.local ceph-mon[49980]: pgmap v2795: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:21 vm03.local ceph-mon[50983]: pgmap v2795: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:23 vm00.local ceph-mon[49980]: pgmap v2796: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:23 vm03.local ceph-mon[50983]: pgmap v2796: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:25 vm00.local ceph-mon[49980]: pgmap v2797: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:25 vm03.local ceph-mon[50983]: pgmap v2797: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:26.740 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:26.741 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:26.768 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:26.768 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:27 vm00.local ceph-mon[49980]: pgmap v2798: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:27 vm03.local ceph-mon[50983]: pgmap v2798: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:29.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:29 vm00.local ceph-mon[49980]: pgmap v2799: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:29 vm03.local ceph-mon[50983]: pgmap v2799: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:31.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:31 vm00.local ceph-mon[49980]: pgmap v2800: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:31.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:31 vm03.local ceph-mon[50983]: pgmap v2800: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:31.769 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:31.770 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:31.797 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:31.798 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:33.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:33 vm00.local ceph-mon[49980]: pgmap v2801: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:33 vm03.local ceph-mon[50983]: pgmap v2801: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:35 vm00.local ceph-mon[49980]: pgmap v2802: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:35 vm03.local ceph-mon[50983]: pgmap v2802: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:48:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:48:36.799 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:36.800 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:48:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:48:36.826 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:36.827 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:37 vm00.local ceph-mon[49980]: pgmap v2803: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:37 vm03.local ceph-mon[50983]: pgmap v2803: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:39.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:39 vm00.local ceph-mon[49980]: pgmap v2804: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:39.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:39 vm03.local ceph-mon[50983]: pgmap v2804: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:41.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:41 vm00.local ceph-mon[49980]: pgmap v2805: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:41.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:41 vm03.local ceph-mon[50983]: pgmap v2805: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:41.828 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:41.829 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:41.855 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:41.856 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:43.343 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:43 vm00.local ceph-mon[49980]: pgmap v2806: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:43.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:43 vm03.local ceph-mon[50983]: pgmap v2806: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:45 vm00.local ceph-mon[49980]: pgmap v2807: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:45 vm03.local ceph-mon[50983]: pgmap v2807: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:46.858 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:46.858 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:46.885 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:46.886 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:47.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:47 vm00.local ceph-mon[49980]: pgmap v2808: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:47.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:47 vm03.local ceph-mon[50983]: pgmap v2808: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:49.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:49 vm00.local ceph-mon[49980]: pgmap v2809: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:49.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:49 vm03.local ceph-mon[50983]: pgmap v2809: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:51.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:51 vm00.local ceph-mon[49980]: pgmap v2810: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:51.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:51 vm03.local ceph-mon[50983]: pgmap v2810: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:51.887 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:51.888 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:51.915 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:51.915 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:53.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:53 vm00.local ceph-mon[49980]: pgmap v2811: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:53 vm03.local ceph-mon[50983]: pgmap v2811: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:55 vm00.local ceph-mon[49980]: pgmap v2812: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:55 vm03.local ceph-mon[50983]: pgmap v2812: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:56.917 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:48:56.917 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:48:56.943 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:48:56.943 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:48:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:57 vm00.local ceph-mon[49980]: pgmap v2813: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:57 vm03.local ceph-mon[50983]: pgmap v2813: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:48:59.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:48:59 vm00.local ceph-mon[49980]: pgmap v2814: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:48:59.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:48:59 vm03.local ceph-mon[50983]: pgmap v2814: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:01.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:01 vm00.local ceph-mon[49980]: pgmap v2815: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:01.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:01 vm03.local ceph-mon[50983]: pgmap v2815: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:01.945 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:01.945 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:02.007 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:02.007 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:03.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:03 vm00.local ceph-mon[49980]: pgmap v2816: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:03.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:03 vm03.local ceph-mon[50983]: pgmap v2816: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:05 vm00.local ceph-mon[49980]: pgmap v2817: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:05 vm03.local ceph-mon[50983]: pgmap v2817: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:07.008 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:07.009 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:07.035 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:07.035 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:07.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:07 vm00.local ceph-mon[49980]: pgmap v2818: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:07 vm03.local ceph-mon[50983]: pgmap v2818: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:09.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:09 vm00.local ceph-mon[49980]: pgmap v2819: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:09.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:09 vm03.local ceph-mon[50983]: pgmap v2819: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:11.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:11 vm00.local ceph-mon[49980]: pgmap v2820: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:11.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:11 vm03.local ceph-mon[50983]: pgmap v2820: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:12.037 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:12.037 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:12.065 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:12.065 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:13.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:13 vm00.local ceph-mon[49980]: pgmap v2821: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:13 vm03.local ceph-mon[50983]: pgmap v2821: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:15.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:15 vm00.local ceph-mon[49980]: pgmap v2822: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:15 vm03.local ceph-mon[50983]: pgmap v2822: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:17.067 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:17.067 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:17.093 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:17.093 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:17.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:17 vm00.local ceph-mon[49980]: pgmap v2823: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:17.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:17 vm03.local ceph-mon[50983]: pgmap v2823: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:19.266 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:19 vm03.local ceph-mon[50983]: pgmap v2824: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:19.266 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:19 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:49:19.266 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:19 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:49:19.266 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:19 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:49:19.455 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:19 vm00.local ceph-mon[49980]: pgmap v2824: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:19.455 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:19 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:49:19.455 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:19 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:49:19.455 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:19 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:49:20.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:20 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:20.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:20 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:20.570 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:20 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:20.570 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:20 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:21 vm00.local ceph-mon[49980]: pgmap v2825: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:21.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:49:21.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:21.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:21.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:21 vm03.local ceph-mon[50983]: pgmap v2825: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:21.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:21.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:49:21.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:21.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:49:22.095 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:22.096 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:22.126 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:22.126 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:23 vm00.local ceph-mon[49980]: pgmap v2826: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:23 vm03.local ceph-mon[50983]: pgmap v2826: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:25 vm00.local ceph-mon[49980]: pgmap v2827: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:25 vm03.local ceph-mon[50983]: pgmap v2827: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:27.128 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:27.128 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:27.156 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:27.157 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:27 vm00.local ceph-mon[49980]: pgmap v2828: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:27 vm03.local ceph-mon[50983]: pgmap v2828: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:29.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:29 vm00.local ceph-mon[49980]: pgmap v2829: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:29.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:29 vm03.local ceph-mon[50983]: pgmap v2829: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:31 vm00.local ceph-mon[49980]: pgmap v2830: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:31 vm03.local ceph-mon[50983]: pgmap v2830: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:32.158 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:32.159 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:32.186 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:32.186 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:33 vm00.local ceph-mon[49980]: pgmap v2831: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:33 vm03.local ceph-mon[50983]: pgmap v2831: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:35 vm00.local ceph-mon[49980]: pgmap v2832: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:49:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:49:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:35 vm03.local ceph-mon[50983]: pgmap v2832: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:49:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:49:37.188 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:37.189 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:37.214 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:37.215 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:37 vm00.local ceph-mon[49980]: pgmap v2833: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:37.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:37 vm03.local ceph-mon[50983]: pgmap v2833: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:39 vm00.local ceph-mon[49980]: pgmap v2834: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:39 vm03.local ceph-mon[50983]: pgmap v2834: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:41 vm00.local ceph-mon[49980]: pgmap v2835: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:41 vm03.local ceph-mon[50983]: pgmap v2835: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:42.216 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:42.217 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:42.243 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:42.243 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:43.815 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:43 vm00.local ceph-mon[49980]: pgmap v2836: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:43 vm03.local ceph-mon[50983]: pgmap v2836: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:46.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:45 vm00.local ceph-mon[49980]: pgmap v2837: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:46.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:45 vm03.local ceph-mon[50983]: pgmap v2837: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:47.245 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:47.246 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:47.274 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:47.274 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:47 vm00.local ceph-mon[49980]: pgmap v2838: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:47 vm03.local ceph-mon[50983]: pgmap v2838: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:49.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:49 vm00.local ceph-mon[49980]: pgmap v2839: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:49 vm03.local ceph-mon[50983]: pgmap v2839: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:51 vm00.local ceph-mon[49980]: pgmap v2840: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:51 vm03.local ceph-mon[50983]: pgmap v2840: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:52.276 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:52.276 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:52.305 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:52.305 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:53 vm00.local ceph-mon[49980]: pgmap v2841: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:53 vm03.local ceph-mon[50983]: pgmap v2841: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:55 vm00.local ceph-mon[49980]: pgmap v2842: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:55 vm03.local ceph-mon[50983]: pgmap v2842: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:57.307 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:49:57.308 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:49:57.335 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:49:57.336 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:49:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:57 vm00.local ceph-mon[49980]: pgmap v2843: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:57 vm03.local ceph-mon[50983]: pgmap v2843: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:49:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:49:59 vm00.local ceph-mon[49980]: pgmap v2844: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:49:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:49:59 vm03.local ceph-mon[50983]: pgmap v2844: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:00.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T06:50:00.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T06:50:01.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:01 vm00.local ceph-mon[49980]: pgmap v2845: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:01 vm03.local ceph-mon[50983]: pgmap v2845: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:02.337 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:02.337 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:02.364 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:02.364 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:03 vm00.local ceph-mon[49980]: pgmap v2846: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:03 vm03.local ceph-mon[50983]: pgmap v2846: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:05 vm00.local ceph-mon[49980]: pgmap v2847: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:05 vm03.local ceph-mon[50983]: pgmap v2847: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:07.366 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:07.366 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:07.393 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:07.394 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:07 vm00.local ceph-mon[49980]: pgmap v2848: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:07 vm03.local ceph-mon[50983]: pgmap v2848: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:09.619 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:09 vm03.local ceph-mon[50983]: pgmap v2849: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:09 vm00.local ceph-mon[49980]: pgmap v2849: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:11 vm00.local ceph-mon[49980]: pgmap v2850: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:11 vm03.local ceph-mon[50983]: pgmap v2850: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:12.395 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:12.395 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:12.423 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:12.423 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:13.688 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:13 vm00.local ceph-mon[49980]: pgmap v2851: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:13 vm03.local ceph-mon[50983]: pgmap v2851: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:15 vm00.local ceph-mon[49980]: pgmap v2852: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:15 vm03.local ceph-mon[50983]: pgmap v2852: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:17.425 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:17.425 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:17.453 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:17.453 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:17 vm00.local ceph-mon[49980]: pgmap v2853: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:17 vm03.local ceph-mon[50983]: pgmap v2853: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:19 vm00.local ceph-mon[49980]: pgmap v2854: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:19 vm03.local ceph-mon[50983]: pgmap v2854: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:21 vm00.local ceph-mon[49980]: pgmap v2855: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:50:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:50:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:50:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:50:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:50:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:50:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:21 vm03.local ceph-mon[50983]: pgmap v2855: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:50:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:50:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:50:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:50:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:50:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:50:22.455 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:22.455 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:22.481 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:22.482 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:23.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:23 vm00.local ceph-mon[49980]: pgmap v2856: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:50:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:23 vm03.local ceph-mon[50983]: pgmap v2856: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:50:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:25 vm00.local ceph-mon[49980]: pgmap v2857: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:25 vm03.local ceph-mon[50983]: pgmap v2857: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:27.483 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:27.484 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:27.510 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:27.511 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:27 vm00.local ceph-mon[49980]: pgmap v2858: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:50:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:27 vm03.local ceph-mon[50983]: pgmap v2858: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T06:50:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:29 vm00.local ceph-mon[49980]: pgmap v2859: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:29 vm03.local ceph-mon[50983]: pgmap v2859: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:31.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:31 vm00.local ceph-mon[49980]: pgmap v2860: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:31 vm03.local ceph-mon[50983]: pgmap v2860: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:32.512 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:32.513 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:32.539 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:32.540 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:33.956 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:33 vm00.local ceph-mon[49980]: pgmap v2861: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:33 vm03.local ceph-mon[50983]: pgmap v2861: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:35 vm00.local ceph-mon[49980]: pgmap v2862: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:50:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:50:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:35 vm03.local ceph-mon[50983]: pgmap v2862: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:50:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:50:37.541 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:37.542 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:37.569 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:37.569 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:38.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:37 vm00.local ceph-mon[49980]: pgmap v2863: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:38.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:37 vm03.local ceph-mon[50983]: pgmap v2863: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:39 vm00.local ceph-mon[49980]: pgmap v2864: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:39 vm03.local ceph-mon[50983]: pgmap v2864: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:41.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:40 vm00.local ceph-mon[49980]: pgmap v2865: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:41.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:40 vm03.local ceph-mon[50983]: pgmap v2865: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:42.571 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:42.571 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:42.609 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:42.610 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:43 vm00.local ceph-mon[49980]: pgmap v2866: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:43 vm03.local ceph-mon[50983]: pgmap v2866: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:45 vm00.local ceph-mon[49980]: pgmap v2867: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:45 vm03.local ceph-mon[50983]: pgmap v2867: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:47.612 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:47.612 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:47.637 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:47.637 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:47 vm00.local ceph-mon[49980]: pgmap v2868: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:47 vm03.local ceph-mon[50983]: pgmap v2868: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:49.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:49 vm00.local ceph-mon[49980]: pgmap v2869: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:49 vm03.local ceph-mon[50983]: pgmap v2869: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:51.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:51 vm00.local ceph-mon[49980]: pgmap v2870: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:51 vm03.local ceph-mon[50983]: pgmap v2870: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:52.639 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:52.640 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:52.666 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:52.667 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:53 vm00.local ceph-mon[49980]: pgmap v2871: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:53 vm03.local ceph-mon[50983]: pgmap v2871: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:55 vm00.local ceph-mon[49980]: pgmap v2872: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:55 vm03.local ceph-mon[50983]: pgmap v2872: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:50:57.668 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:50:57.669 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:50:57.695 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:50:57.695 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:50:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:57 vm00.local ceph-mon[49980]: pgmap v2873: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:57 vm03.local ceph-mon[50983]: pgmap v2873: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:50:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:50:59 vm03.local ceph-mon[50983]: pgmap v2874: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:50:59 vm00.local ceph-mon[49980]: pgmap v2874: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:01 vm00.local ceph-mon[49980]: pgmap v2875: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:01 vm03.local ceph-mon[50983]: pgmap v2875: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:02.697 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:02.698 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:02.727 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:02.728 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:02 vm00.local ceph-mon[49980]: pgmap v2876: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:02 vm03.local ceph-mon[50983]: pgmap v2876: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:05 vm00.local ceph-mon[49980]: pgmap v2877: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:05 vm03.local ceph-mon[50983]: pgmap v2877: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:07.729 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:07.731 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:07.756 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:07.757 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:07 vm00.local ceph-mon[49980]: pgmap v2878: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:07 vm03.local ceph-mon[50983]: pgmap v2878: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:09.608 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:09 vm03.local ceph-mon[50983]: pgmap v2879: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:09 vm00.local ceph-mon[49980]: pgmap v2879: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:11 vm00.local ceph-mon[49980]: pgmap v2880: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:11 vm03.local ceph-mon[50983]: pgmap v2880: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:12.758 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:12.759 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:12.786 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:12.786 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:13.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:13 vm00.local ceph-mon[49980]: pgmap v2881: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:13 vm03.local ceph-mon[50983]: pgmap v2881: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:15 vm00.local ceph-mon[49980]: pgmap v2882: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:15 vm03.local ceph-mon[50983]: pgmap v2882: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:17 vm00.local ceph-mon[49980]: pgmap v2883: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:17.787 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:17.788 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:17 vm03.local ceph-mon[50983]: pgmap v2883: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:17.815 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:17.815 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:19.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:19 vm00.local ceph-mon[49980]: pgmap v2884: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:19 vm03.local ceph-mon[50983]: pgmap v2884: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:21 vm00.local ceph-mon[49980]: pgmap v2885: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:51:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:51:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:51:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:51:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:51:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:21 vm03.local ceph-mon[50983]: pgmap v2885: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:51:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:51:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:51:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:51:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:51:22.817 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:22.817 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:22.843 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:22.844 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:23 vm03.local ceph-mon[50983]: pgmap v2886: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:23.960 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:23 vm00.local ceph-mon[49980]: pgmap v2886: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:25 vm03.local ceph-mon[50983]: pgmap v2887: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:26.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:25 vm00.local ceph-mon[49980]: pgmap v2887: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:27 vm03.local ceph-mon[50983]: pgmap v2888: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:27.845 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:27.845 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:27.874 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:27.874 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:27 vm00.local ceph-mon[49980]: pgmap v2888: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:29 vm03.local ceph-mon[50983]: pgmap v2889: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:29 vm00.local ceph-mon[49980]: pgmap v2889: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:31 vm03.local ceph-mon[50983]: pgmap v2890: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:31 vm00.local ceph-mon[49980]: pgmap v2890: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:32.876 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:32.876 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:32.903 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:32.903 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:33.960 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:33 vm00.local ceph-mon[49980]: pgmap v2891: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:33 vm03.local ceph-mon[50983]: pgmap v2891: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:35 vm00.local ceph-mon[49980]: pgmap v2892: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:51:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:51:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:35 vm03.local ceph-mon[50983]: pgmap v2892: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:51:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:51:37.905 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:37.905 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:37.961 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:37.962 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:38.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:37 vm00.local ceph-mon[49980]: pgmap v2893: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:38.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:37 vm03.local ceph-mon[50983]: pgmap v2893: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:39 vm03.local ceph-mon[50983]: pgmap v2894: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:39 vm00.local ceph-mon[49980]: pgmap v2894: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:41 vm00.local ceph-mon[49980]: pgmap v2895: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:41 vm03.local ceph-mon[50983]: pgmap v2895: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:42.965 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:42.965 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:42.993 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:42.993 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:43.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:43 vm00.local ceph-mon[49980]: pgmap v2896: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:43 vm03.local ceph-mon[50983]: pgmap v2896: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:46.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:45 vm00.local ceph-mon[49980]: pgmap v2897: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:46.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:45 vm03.local ceph-mon[50983]: pgmap v2897: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:47.994 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:47.995 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:48.021 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:48.022 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:47 vm00.local ceph-mon[49980]: pgmap v2898: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:47 vm03.local ceph-mon[50983]: pgmap v2898: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:49 vm03.local ceph-mon[50983]: pgmap v2899: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:49 vm00.local ceph-mon[49980]: pgmap v2899: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:51 vm00.local ceph-mon[49980]: pgmap v2900: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:51 vm03.local ceph-mon[50983]: pgmap v2900: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:53.023 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:53.024 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:53.050 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:53.050 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:54.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:54 vm00.local ceph-mon[49980]: pgmap v2901: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:54.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:54 vm03.local ceph-mon[50983]: pgmap v2901: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:55 vm00.local ceph-mon[49980]: pgmap v2902: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:55 vm03.local ceph-mon[50983]: pgmap v2902: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:51:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:57 vm00.local ceph-mon[49980]: pgmap v2903: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:58.052 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:51:58.052 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:51:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:57 vm03.local ceph-mon[50983]: pgmap v2903: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:51:58.078 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:51:58.079 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:51:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:51:59 vm03.local ceph-mon[50983]: pgmap v2904: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:51:59 vm00.local ceph-mon[49980]: pgmap v2904: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:01 vm00.local ceph-mon[49980]: pgmap v2905: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:01 vm03.local ceph-mon[50983]: pgmap v2905: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:03.080 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:03.081 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:03.106 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:03.106 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:03.963 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:03 vm00.local ceph-mon[49980]: pgmap v2906: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:03 vm03.local ceph-mon[50983]: pgmap v2906: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:04 vm00.local ceph-mon[49980]: pgmap v2907: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:04 vm03.local ceph-mon[50983]: pgmap v2907: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:07.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:07 vm03.local ceph-mon[50983]: pgmap v2908: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:07 vm00.local ceph-mon[49980]: pgmap v2908: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:08.108 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:08.108 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:08.134 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:08.134 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:09 vm03.local ceph-mon[50983]: pgmap v2909: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:09 vm00.local ceph-mon[49980]: pgmap v2909: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:11 vm00.local ceph-mon[49980]: pgmap v2910: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:11 vm03.local ceph-mon[50983]: pgmap v2910: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:13.135 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:13.136 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:13.162 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:13.162 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:13.963 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:13 vm00.local ceph-mon[49980]: pgmap v2911: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:13 vm03.local ceph-mon[50983]: pgmap v2911: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:15 vm00.local ceph-mon[49980]: pgmap v2912: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:15 vm03.local ceph-mon[50983]: pgmap v2912: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:17 vm00.local ceph-mon[49980]: pgmap v2913: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:17 vm03.local ceph-mon[50983]: pgmap v2913: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:18.163 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:18.164 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:18.190 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:18.190 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:19 vm03.local ceph-mon[50983]: pgmap v2914: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:19 vm00.local ceph-mon[49980]: pgmap v2914: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:21 vm00.local ceph-mon[49980]: pgmap v2915: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:52:22.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:52:22.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:52:22.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:52:22.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:52:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:21 vm03.local ceph-mon[50983]: pgmap v2915: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:52:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:52:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:52:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:52:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:52:23.191 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:23.192 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:23.218 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:23.219 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:23.964 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:23 vm00.local ceph-mon[49980]: pgmap v2916: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:23 vm03.local ceph-mon[50983]: pgmap v2916: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:26.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:25 vm00.local ceph-mon[49980]: pgmap v2917: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:25 vm03.local ceph-mon[50983]: pgmap v2917: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:27 vm00.local ceph-mon[49980]: pgmap v2918: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:27 vm03.local ceph-mon[50983]: pgmap v2918: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:28.220 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:28.220 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:28.246 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:28.247 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:29 vm00.local ceph-mon[49980]: pgmap v2919: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:29 vm03.local ceph-mon[50983]: pgmap v2919: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:31 vm00.local ceph-mon[49980]: pgmap v2920: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:31 vm03.local ceph-mon[50983]: pgmap v2920: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:32 vm00.local ceph-mon[49980]: pgmap v2921: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:33.249 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:33.249 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:33.275 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:33.275 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:32 vm03.local ceph-mon[50983]: pgmap v2921: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:35 vm00.local ceph-mon[49980]: pgmap v2922: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:52:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:52:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:35 vm03.local ceph-mon[50983]: pgmap v2922: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:52:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:52:38.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:37 vm00.local ceph-mon[49980]: pgmap v2923: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:38.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:37 vm03.local ceph-mon[50983]: pgmap v2923: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:38.276 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:38.277 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:38.302 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:38.302 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:39 vm03.local ceph-mon[50983]: pgmap v2924: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:39 vm00.local ceph-mon[49980]: pgmap v2924: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:41 vm00.local ceph-mon[49980]: pgmap v2925: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:41 vm03.local ceph-mon[50983]: pgmap v2925: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:43.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:42 vm00.local ceph-mon[49980]: pgmap v2926: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:43.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:42 vm03.local ceph-mon[50983]: pgmap v2926: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:43.303 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:43.304 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:43.330 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:43.330 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:46.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:45 vm00.local ceph-mon[49980]: pgmap v2927: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:46.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:45 vm03.local ceph-mon[50983]: pgmap v2927: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:47 vm00.local ceph-mon[49980]: pgmap v2928: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:47 vm03.local ceph-mon[50983]: pgmap v2928: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:48.332 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:48.332 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:48.360 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:48.361 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:49 vm00.local ceph-mon[49980]: pgmap v2929: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:49 vm03.local ceph-mon[50983]: pgmap v2929: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:51 vm00.local ceph-mon[49980]: pgmap v2930: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:51 vm03.local ceph-mon[50983]: pgmap v2930: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:53.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:52 vm00.local ceph-mon[49980]: pgmap v2931: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:53.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:52 vm03.local ceph-mon[50983]: pgmap v2931: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:53.362 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:53.363 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:53.517 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:53.518 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:52:56.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:55 vm00.local ceph-mon[49980]: pgmap v2932: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:56.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:55 vm03.local ceph-mon[50983]: pgmap v2932: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:52:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:57 vm00.local ceph-mon[49980]: pgmap v2933: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:57 vm03.local ceph-mon[50983]: pgmap v2933: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:52:58.520 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:52:58.520 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:52:58.634 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:52:58.635 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:52:59 vm00.local ceph-mon[49980]: pgmap v2934: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:52:59 vm03.local ceph-mon[50983]: pgmap v2934: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:01 vm00.local ceph-mon[49980]: pgmap v2935: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:01 vm03.local ceph-mon[50983]: pgmap v2935: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:03.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:02 vm00.local ceph-mon[49980]: pgmap v2936: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:03.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:02 vm03.local ceph-mon[50983]: pgmap v2936: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:03.637 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:03.637 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:03.664 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:03.664 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:06.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:05 vm00.local ceph-mon[49980]: pgmap v2937: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:06.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:05 vm03.local ceph-mon[50983]: pgmap v2937: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:07 vm00.local ceph-mon[49980]: pgmap v2938: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:07 vm03.local ceph-mon[50983]: pgmap v2938: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:08.665 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:08.666 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:08.729 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:08.730 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:10.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:09 vm00.local ceph-mon[49980]: pgmap v2939: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:10.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:09 vm03.local ceph-mon[50983]: pgmap v2939: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:11.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:10 vm03.local ceph-mon[50983]: pgmap v2940: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:11.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:10 vm00.local ceph-mon[49980]: pgmap v2940: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:13 vm00.local ceph-mon[49980]: pgmap v2941: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:13.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:13 vm03.local ceph-mon[50983]: pgmap v2941: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:13.731 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:13.732 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:13.758 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:13.759 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:15 vm00.local ceph-mon[49980]: pgmap v2942: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:15 vm03.local ceph-mon[50983]: pgmap v2942: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:17 vm00.local ceph-mon[49980]: pgmap v2943: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:17 vm03.local ceph-mon[50983]: pgmap v2943: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:18.760 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:18.761 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:18.788 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:18.788 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:19 vm00.local ceph-mon[49980]: pgmap v2944: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:19 vm03.local ceph-mon[50983]: pgmap v2944: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:21 vm00.local ceph-mon[49980]: pgmap v2945: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:53:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:53:21.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:53:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:21 vm03.local ceph-mon[50983]: pgmap v2945: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:53:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:53:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:53:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:53:23.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:53:23.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:22 vm00.local ceph-mon[49980]: pgmap v2946: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:53:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:53:23.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:22 vm03.local ceph-mon[50983]: pgmap v2946: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:23.790 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:23.790 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:23.816 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:23.817 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:26.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:25 vm00.local ceph-mon[49980]: pgmap v2947: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:25 vm03.local ceph-mon[50983]: pgmap v2947: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:27 vm00.local ceph-mon[49980]: pgmap v2948: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:27 vm03.local ceph-mon[50983]: pgmap v2948: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:28.818 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:28.819 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:28.846 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:28.847 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:29 vm00.local ceph-mon[49980]: pgmap v2949: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:29 vm03.local ceph-mon[50983]: pgmap v2949: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:31 vm00.local ceph-mon[49980]: pgmap v2950: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:31 vm03.local ceph-mon[50983]: pgmap v2950: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:33.848 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:33.849 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:33.876 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:33.876 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:33.970 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:33 vm00.local ceph-mon[49980]: pgmap v2951: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:33 vm03.local ceph-mon[50983]: pgmap v2951: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:35 vm00.local ceph-mon[49980]: pgmap v2952: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:53:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:53:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:35 vm03.local ceph-mon[50983]: pgmap v2952: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:53:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:53:38.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:37 vm00.local ceph-mon[49980]: pgmap v2953: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:38.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:37 vm03.local ceph-mon[50983]: pgmap v2953: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:38.878 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:38.878 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:38.904 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:38.905 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:39 vm00.local ceph-mon[49980]: pgmap v2954: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:39 vm03.local ceph-mon[50983]: pgmap v2954: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:41 vm00.local ceph-mon[49980]: pgmap v2955: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:41 vm03.local ceph-mon[50983]: pgmap v2955: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:43.906 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:43.907 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:43.932 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:43.933 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:43.971 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:43 vm00.local ceph-mon[49980]: pgmap v2956: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:43 vm03.local ceph-mon[50983]: pgmap v2956: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:46.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:45 vm00.local ceph-mon[49980]: pgmap v2957: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:46.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:45 vm03.local ceph-mon[50983]: pgmap v2957: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:47 vm00.local ceph-mon[49980]: pgmap v2958: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:47 vm03.local ceph-mon[50983]: pgmap v2958: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:48.934 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:48.935 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:48.961 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:48.962 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:49 vm00.local ceph-mon[49980]: pgmap v2959: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:49 vm03.local ceph-mon[50983]: pgmap v2959: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:51 vm00.local ceph-mon[49980]: pgmap v2960: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:51 vm03.local ceph-mon[50983]: pgmap v2960: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:53.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:52 vm00.local ceph-mon[49980]: pgmap v2961: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:52 vm03.local ceph-mon[50983]: pgmap v2961: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:53.963 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:53.963 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:53.989 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:53.990 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:53:56.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:55 vm00.local ceph-mon[49980]: pgmap v2962: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:56.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:55 vm03.local ceph-mon[50983]: pgmap v2962: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:53:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:57 vm00.local ceph-mon[49980]: pgmap v2963: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:57 vm03.local ceph-mon[50983]: pgmap v2963: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:53:58.991 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:53:58.992 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:53:59.017 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:53:59.017 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:53:59 vm00.local ceph-mon[49980]: pgmap v2964: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:53:59 vm03.local ceph-mon[50983]: pgmap v2964: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:01 vm00.local ceph-mon[49980]: pgmap v2965: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:01 vm03.local ceph-mon[50983]: pgmap v2965: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:03.974 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:03 vm00.local ceph-mon[49980]: pgmap v2966: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:04.018 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:04.019 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:04.046 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:04.046 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:03 vm03.local ceph-mon[50983]: pgmap v2966: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:06.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:05 vm00.local ceph-mon[49980]: pgmap v2967: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:06.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:05 vm03.local ceph-mon[50983]: pgmap v2967: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:07 vm00.local ceph-mon[49980]: pgmap v2968: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:07 vm03.local ceph-mon[50983]: pgmap v2968: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:09.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:08 vm00.local ceph-mon[49980]: pgmap v2969: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:09.048 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:09.049 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:09.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:08 vm03.local ceph-mon[50983]: pgmap v2969: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:09.077 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:09.077 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:11.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:11 vm00.local ceph-mon[49980]: pgmap v2970: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:11 vm03.local ceph-mon[50983]: pgmap v2970: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:13.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:13 vm00.local ceph-mon[49980]: pgmap v2971: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:13 vm03.local ceph-mon[50983]: pgmap v2971: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:14.079 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:14.079 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:14.171 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:14.171 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:16.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:15 vm00.local ceph-mon[49980]: pgmap v2972: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:16.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:15 vm03.local ceph-mon[50983]: pgmap v2972: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:17.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:16 vm00.local ceph-mon[49980]: pgmap v2973: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:17.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:16 vm03.local ceph-mon[50983]: pgmap v2973: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:19.172 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:19.173 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:19.198 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:19.198 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:19 vm00.local ceph-mon[49980]: pgmap v2974: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:19 vm03.local ceph-mon[50983]: pgmap v2974: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:21.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:20 vm00.local ceph-mon[49980]: pgmap v2975: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 167 B/s wr, 0 op/s 2026-03-10T06:54:21.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:20 vm03.local ceph-mon[50983]: pgmap v2975: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 167 B/s wr, 0 op/s 2026-03-10T06:54:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:54:22.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:54:22.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:21 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:54:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:54:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:54:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:21 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:54:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:54:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:54:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:22 vm00.local ceph-mon[49980]: pgmap v2976: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 251 B/s wr, 0 op/s 2026-03-10T06:54:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:54:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:54:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:22 vm03.local ceph-mon[50983]: pgmap v2976: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 251 B/s wr, 0 op/s 2026-03-10T06:54:24.199 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:24.200 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:24.225 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:24.226 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:26.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:25 vm00.local ceph-mon[49980]: pgmap v2977: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 251 B/s wr, 0 op/s 2026-03-10T06:54:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:25 vm03.local ceph-mon[50983]: pgmap v2977: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 251 B/s wr, 0 op/s 2026-03-10T06:54:27.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:26 vm00.local ceph-mon[49980]: pgmap v2978: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 167 B/s wr, 0 op/s 2026-03-10T06:54:27.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:26 vm03.local ceph-mon[50983]: pgmap v2978: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 167 B/s wr, 0 op/s 2026-03-10T06:54:29.227 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:29.228 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:29.258 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:29.258 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:29 vm00.local ceph-mon[49980]: pgmap v2979: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 335 B/s rd, 251 B/s wr, 0 op/s 2026-03-10T06:54:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:29 vm03.local ceph-mon[50983]: pgmap v2979: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 335 B/s rd, 251 B/s wr, 0 op/s 2026-03-10T06:54:31.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:30 vm00.local ceph-mon[49980]: pgmap v2980: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 167 B/s wr, 0 op/s 2026-03-10T06:54:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:30 vm03.local ceph-mon[50983]: pgmap v2980: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 167 B/s rd, 167 B/s wr, 0 op/s 2026-03-10T06:54:33.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:33 vm00.local ceph-mon[49980]: pgmap v2981: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:33.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:33 vm03.local ceph-mon[50983]: pgmap v2981: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:34.260 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:34.260 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:34.288 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:34.289 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:34 vm00.local ceph-mon[49980]: pgmap v2982: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:34 vm03.local ceph-mon[50983]: pgmap v2982: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:54:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:54:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:54:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:54:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:36 vm00.local ceph-mon[49980]: pgmap v2983: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:36 vm03.local ceph-mon[50983]: pgmap v2983: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:39.291 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:39.291 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:39.318 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:39.319 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:39 vm00.local ceph-mon[49980]: pgmap v2984: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:39 vm03.local ceph-mon[50983]: pgmap v2984: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:41.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:40 vm00.local ceph-mon[49980]: pgmap v2985: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:41.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:40 vm03.local ceph-mon[50983]: pgmap v2985: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:43.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:43 vm00.local ceph-mon[49980]: pgmap v2986: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:43 vm03.local ceph-mon[50983]: pgmap v2986: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:44.320 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:44.320 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:44.347 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:44.347 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:44 vm00.local ceph-mon[49980]: pgmap v2987: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:44 vm03.local ceph-mon[50983]: pgmap v2987: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:47.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:47 vm00.local ceph-mon[49980]: pgmap v2988: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:47 vm03.local ceph-mon[50983]: pgmap v2988: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:49.349 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:49.349 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:49.375 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:49.376 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:49 vm00.local ceph-mon[49980]: pgmap v2989: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:49 vm03.local ceph-mon[50983]: pgmap v2989: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:51.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:50 vm00.local ceph-mon[49980]: pgmap v2990: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:50 vm03.local ceph-mon[50983]: pgmap v2990: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:53.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:53 vm00.local ceph-mon[49980]: pgmap v2991: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:53 vm03.local ceph-mon[50983]: pgmap v2991: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:54:54.377 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:54.378 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:54.406 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:54.406 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:54:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:54 vm00.local ceph-mon[49980]: pgmap v2992: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:54 vm03.local ceph-mon[50983]: pgmap v2992: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:57.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:57 vm00.local ceph-mon[49980]: pgmap v2993: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:57 vm03.local ceph-mon[50983]: pgmap v2993: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:54:59.408 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:54:59.409 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:54:59.438 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:54:59.438 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:54:59 vm00.local ceph-mon[49980]: pgmap v2994: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:54:59 vm03.local ceph-mon[50983]: pgmap v2994: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:00 vm00.local ceph-mon[49980]: pgmap v2995: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:00 vm03.local ceph-mon[50983]: pgmap v2995: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:03.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:03 vm00.local ceph-mon[49980]: pgmap v2996: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:03.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:03 vm03.local ceph-mon[50983]: pgmap v2996: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:04.440 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:04.440 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:04.468 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:04.468 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:04 vm00.local ceph-mon[49980]: pgmap v2997: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:04 vm03.local ceph-mon[50983]: pgmap v2997: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:07.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:07 vm00.local ceph-mon[49980]: pgmap v2998: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:07 vm03.local ceph-mon[50983]: pgmap v2998: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:09.469 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:09.470 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:09.497 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:09.497 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:09.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:09 vm00.local ceph-mon[49980]: pgmap v2999: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:09 vm03.local ceph-mon[50983]: pgmap v2999: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:11.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:11 vm03.local ceph-mon[50983]: pgmap v3000: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:11 vm00.local ceph-mon[49980]: pgmap v3000: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:13.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:13 vm03.local ceph-mon[50983]: pgmap v3001: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:13.980 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:13 vm00.local ceph-mon[49980]: pgmap v3001: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:14.499 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:14.499 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:14.625 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:14.625 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:14 vm00.local ceph-mon[49980]: pgmap v3002: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:14 vm03.local ceph-mon[50983]: pgmap v3002: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:17.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:17 vm00.local ceph-mon[49980]: pgmap v3003: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:17 vm03.local ceph-mon[50983]: pgmap v3003: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:19.627 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:19.628 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:19.658 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:19.659 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:19 vm03.local ceph-mon[50983]: pgmap v3004: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:19 vm00.local ceph-mon[49980]: pgmap v3004: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:21 vm03.local ceph-mon[50983]: pgmap v3005: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:22.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:21 vm00.local ceph-mon[49980]: pgmap v3005: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:55:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:55:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:55:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:55:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:55:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:55:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:55:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:55:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:55:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:55:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:23 vm03.local ceph-mon[50983]: pgmap v3006: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:23.980 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:23 vm00.local ceph-mon[49980]: pgmap v3006: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:24.660 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:24.660 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:24.690 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:24.690 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:24 vm00.local ceph-mon[49980]: pgmap v3007: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:24 vm03.local ceph-mon[50983]: pgmap v3007: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:27 vm03.local ceph-mon[50983]: pgmap v3008: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:27 vm00.local ceph-mon[49980]: pgmap v3008: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:29.692 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:29.692 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:29.719 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:29.719 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:29 vm03.local ceph-mon[50983]: pgmap v3009: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:29 vm00.local ceph-mon[49980]: pgmap v3009: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:31 vm00.local ceph-mon[49980]: pgmap v3010: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:31 vm03.local ceph-mon[50983]: pgmap v3010: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:33.981 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:33 vm00.local ceph-mon[49980]: pgmap v3011: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:33 vm03.local ceph-mon[50983]: pgmap v3011: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:34.721 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:34.721 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:34.747 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:34.748 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:34 vm00.local ceph-mon[49980]: pgmap v3012: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:34 vm03.local ceph-mon[50983]: pgmap v3012: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:55:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:55:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:55:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:55:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:36 vm00.local ceph-mon[49980]: pgmap v3013: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:36 vm03.local ceph-mon[50983]: pgmap v3013: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:39.749 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:39.750 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:39.758 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:39 vm03.local ceph-mon[50983]: pgmap v3014: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:39.776 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:39.777 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:39 vm00.local ceph-mon[49980]: pgmap v3014: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:41.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:41 vm03.local ceph-mon[50983]: pgmap v3015: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:41 vm00.local ceph-mon[49980]: pgmap v3015: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:43.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:43 vm03.local ceph-mon[50983]: pgmap v3016: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:43.982 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:43 vm00.local ceph-mon[49980]: pgmap v3016: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:44.778 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:44.779 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:44.804 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:44.805 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:44 vm00.local ceph-mon[49980]: pgmap v3017: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:44 vm03.local ceph-mon[50983]: pgmap v3017: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:47 vm03.local ceph-mon[50983]: pgmap v3018: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:47 vm00.local ceph-mon[49980]: pgmap v3018: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:49 vm03.local ceph-mon[50983]: pgmap v3019: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:49.806 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:49.807 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:49.833 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:49.834 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:49 vm00.local ceph-mon[49980]: pgmap v3019: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:51 vm03.local ceph-mon[50983]: pgmap v3020: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:51 vm00.local ceph-mon[49980]: pgmap v3020: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:53.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:53 vm03.local ceph-mon[50983]: pgmap v3021: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:53.982 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:53 vm00.local ceph-mon[49980]: pgmap v3021: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:54.835 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:54.836 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:54.862 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:54.862 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:55:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:54 vm00.local ceph-mon[49980]: pgmap v3022: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:54 vm03.local ceph-mon[50983]: pgmap v3022: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:57 vm03.local ceph-mon[50983]: pgmap v3023: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:57 vm00.local ceph-mon[49980]: pgmap v3023: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:55:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:55:59 vm03.local ceph-mon[50983]: pgmap v3024: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:55:59.863 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:55:59.864 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:55:59.891 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:55:59.892 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:55:59 vm00.local ceph-mon[49980]: pgmap v3024: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:02.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:01 vm00.local ceph-mon[49980]: pgmap v3025: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:02.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:01 vm03.local ceph-mon[50983]: pgmap v3025: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:02 vm00.local ceph-mon[49980]: pgmap v3026: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:02 vm03.local ceph-mon[50983]: pgmap v3026: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:04.893 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:04.894 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:04.921 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:04.921 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:04 vm00.local ceph-mon[49980]: pgmap v3027: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:04 vm03.local ceph-mon[50983]: pgmap v3027: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:07 vm00.local ceph-mon[49980]: pgmap v3028: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:07 vm03.local ceph-mon[50983]: pgmap v3028: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:09 vm03.local ceph-mon[50983]: pgmap v3029: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:09.923 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:09.923 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:09.950 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:09.951 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:09 vm00.local ceph-mon[49980]: pgmap v3029: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:12.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:11 vm00.local ceph-mon[49980]: pgmap v3030: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:12.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:11 vm03.local ceph-mon[50983]: pgmap v3030: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:12 vm00.local ceph-mon[49980]: pgmap v3031: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:12 vm03.local ceph-mon[50983]: pgmap v3031: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:14.953 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:14.953 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:14.979 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:14.979 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:14 vm00.local ceph-mon[49980]: pgmap v3032: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:14 vm03.local ceph-mon[50983]: pgmap v3032: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:17 vm03.local ceph-mon[50983]: pgmap v3033: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:17 vm00.local ceph-mon[49980]: pgmap v3033: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:19 vm03.local ceph-mon[50983]: pgmap v3034: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:19.980 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:19.981 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:20.006 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:20.007 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:19 vm00.local ceph-mon[49980]: pgmap v3034: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:21.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:21 vm03.local ceph-mon[50983]: pgmap v3035: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:21 vm00.local ceph-mon[49980]: pgmap v3035: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:22.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:56:22.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:56:22.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:56:22.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:56:22.555 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:56:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:56:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:56:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:56:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:56:23.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:56:23.984 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:23 vm00.local ceph-mon[49980]: pgmap v3036: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:23 vm03.local ceph-mon[50983]: pgmap v3036: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:25.008 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:25.009 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:24 vm00.local ceph-mon[49980]: pgmap v3037: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:25.034 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:25.035 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:24 vm03.local ceph-mon[50983]: pgmap v3037: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:27 vm03.local ceph-mon[50983]: pgmap v3038: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:27 vm00.local ceph-mon[49980]: pgmap v3038: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:29 vm03.local ceph-mon[50983]: pgmap v3039: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:29 vm00.local ceph-mon[49980]: pgmap v3039: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:30.036 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:30.036 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:30.063 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:30.064 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:31.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:31 vm03.local ceph-mon[50983]: pgmap v3040: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:31 vm00.local ceph-mon[49980]: pgmap v3040: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:33.985 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:33 vm00.local ceph-mon[49980]: pgmap v3041: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:33 vm03.local ceph-mon[50983]: pgmap v3041: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:34 vm00.local ceph-mon[49980]: pgmap v3042: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:34 vm03.local ceph-mon[50983]: pgmap v3042: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:35.065 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:35.065 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:35.091 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:35.092 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:56:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:56:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:56:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:56:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:36 vm00.local ceph-mon[49980]: pgmap v3043: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:36 vm03.local ceph-mon[50983]: pgmap v3043: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:39.759 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:39 vm03.local ceph-mon[50983]: pgmap v3044: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:39 vm00.local ceph-mon[49980]: pgmap v3044: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:40.093 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:40.094 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:40.120 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:40.121 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:41 vm00.local ceph-mon[49980]: pgmap v3045: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:41 vm03.local ceph-mon[50983]: pgmap v3045: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:43.986 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:43 vm00.local ceph-mon[49980]: pgmap v3046: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:43 vm03.local ceph-mon[50983]: pgmap v3046: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:44 vm00.local ceph-mon[49980]: pgmap v3047: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:44 vm03.local ceph-mon[50983]: pgmap v3047: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:45.122 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:45.122 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:45.149 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:45.149 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:47.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:47 vm03.local ceph-mon[50983]: pgmap v3048: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:47 vm00.local ceph-mon[49980]: pgmap v3048: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:49 vm03.local ceph-mon[50983]: pgmap v3049: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:49 vm00.local ceph-mon[49980]: pgmap v3049: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:50.151 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:50.151 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:50.179 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:50.179 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:51.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:51 vm03.local ceph-mon[50983]: pgmap v3050: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:51 vm00.local ceph-mon[49980]: pgmap v3050: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:53.987 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:53 vm00.local ceph-mon[49980]: pgmap v3051: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:53 vm03.local ceph-mon[50983]: pgmap v3051: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:56:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:54 vm00.local ceph-mon[49980]: pgmap v3052: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:54 vm03.local ceph-mon[50983]: pgmap v3052: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:55.181 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:56:55.181 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:56:55.207 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:56:55.208 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:56:57.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:57 vm03.local ceph-mon[50983]: pgmap v3053: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:57 vm00.local ceph-mon[49980]: pgmap v3053: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:56:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:56:59 vm03.local ceph-mon[50983]: pgmap v3054: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:56:59 vm00.local ceph-mon[49980]: pgmap v3054: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:00.209 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:00.210 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:00.240 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:00.241 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:01.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:01 vm03.local ceph-mon[50983]: pgmap v3055: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:01 vm00.local ceph-mon[49980]: pgmap v3055: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:03.989 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:03 vm00.local ceph-mon[49980]: pgmap v3056: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:03 vm03.local ceph-mon[50983]: pgmap v3056: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:04 vm00.local ceph-mon[49980]: pgmap v3057: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:04 vm03.local ceph-mon[50983]: pgmap v3057: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:05.242 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:05.243 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:05.272 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:05.272 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:07.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:07 vm03.local ceph-mon[50983]: pgmap v3058: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:07 vm00.local ceph-mon[49980]: pgmap v3058: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:09 vm03.local ceph-mon[50983]: pgmap v3059: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:09 vm00.local ceph-mon[49980]: pgmap v3059: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:10.273 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:10.274 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:10.300 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:10.300 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:11 vm00.local ceph-mon[49980]: pgmap v3060: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:11 vm03.local ceph-mon[50983]: pgmap v3060: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:13.989 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:13 vm00.local ceph-mon[49980]: pgmap v3061: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:13 vm03.local ceph-mon[50983]: pgmap v3061: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:14 vm00.local ceph-mon[49980]: pgmap v3062: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:14 vm03.local ceph-mon[50983]: pgmap v3062: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:15.302 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:15.302 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:15.328 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:15.329 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:17.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:17 vm03.local ceph-mon[50983]: pgmap v3063: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:17 vm00.local ceph-mon[49980]: pgmap v3063: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:19 vm03.local ceph-mon[50983]: pgmap v3064: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:19 vm00.local ceph-mon[49980]: pgmap v3064: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:20.330 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:20.331 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:20.356 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:20.357 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:21 vm00.local ceph-mon[49980]: pgmap v3065: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:21 vm03.local ceph-mon[50983]: pgmap v3065: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:22.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:22.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:22.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:22 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:22.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:22 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:23.989 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:23 vm00.local ceph-mon[49980]: pgmap v3066: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:23.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:57:23.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:57:23.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:57:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:23 vm03.local ceph-mon[50983]: pgmap v3066: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:57:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:57:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:57:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:24 vm00.local ceph-mon[49980]: pgmap v3067: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:24 vm03.local ceph-mon[50983]: pgmap v3067: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:25.358 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:25.359 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:25.387 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:25.387 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:27 vm03.local ceph-mon[50983]: pgmap v3068: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:27 vm00.local ceph-mon[49980]: pgmap v3068: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:29 vm03.local ceph-mon[50983]: pgmap v3069: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:29 vm00.local ceph-mon[49980]: pgmap v3069: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:30.389 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:30.389 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:30.416 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:30.417 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:31 vm00.local ceph-mon[49980]: pgmap v3070: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:31 vm03.local ceph-mon[50983]: pgmap v3070: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:33.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:33 vm00.local ceph-mon[49980]: pgmap v3071: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:33 vm03.local ceph-mon[50983]: pgmap v3071: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:34 vm00.local ceph-mon[49980]: pgmap v3072: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:34 vm03.local ceph-mon[50983]: pgmap v3072: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:35.419 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:35.419 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:35.447 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:35.448 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:57:36.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:57:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:57:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:57:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:36 vm00.local ceph-mon[49980]: pgmap v3073: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:36 vm03.local ceph-mon[50983]: pgmap v3073: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:39 vm03.local ceph-mon[50983]: pgmap v3074: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:39 vm00.local ceph-mon[49980]: pgmap v3074: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:40.449 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:40.449 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:40.475 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:40.476 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:41 vm00.local ceph-mon[49980]: pgmap v3075: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:41 vm03.local ceph-mon[50983]: pgmap v3075: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:43.991 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:43 vm00.local ceph-mon[49980]: pgmap v3076: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:43 vm03.local ceph-mon[50983]: pgmap v3076: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:44 vm00.local ceph-mon[49980]: pgmap v3077: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:44 vm03.local ceph-mon[50983]: pgmap v3077: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:45.477 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:45.478 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:45.506 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:45.507 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:47 vm00.local ceph-mon[49980]: pgmap v3078: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:47 vm03.local ceph-mon[50983]: pgmap v3078: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:49 vm03.local ceph-mon[50983]: pgmap v3079: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:49 vm00.local ceph-mon[49980]: pgmap v3079: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:50.508 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:50.508 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:50.535 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:50.535 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:51 vm00.local ceph-mon[49980]: pgmap v3080: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:51 vm03.local ceph-mon[50983]: pgmap v3080: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:53.992 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:53 vm00.local ceph-mon[49980]: pgmap v3081: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:53 vm03.local ceph-mon[50983]: pgmap v3081: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:57:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:54 vm00.local ceph-mon[49980]: pgmap v3082: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:54 vm03.local ceph-mon[50983]: pgmap v3082: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:55.537 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:57:55.537 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:57:55.563 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:57:55.563 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:57:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:57 vm00.local ceph-mon[49980]: pgmap v3083: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:57 vm03.local ceph-mon[50983]: pgmap v3083: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:57:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:57:59 vm03.local ceph-mon[50983]: pgmap v3084: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:57:59 vm00.local ceph-mon[49980]: pgmap v3084: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:00.565 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:00.565 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:00.592 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:00.593 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:01 vm00.local ceph-mon[49980]: pgmap v3085: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:01 vm03.local ceph-mon[50983]: pgmap v3085: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:03.993 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:03 vm00.local ceph-mon[49980]: pgmap v3086: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:03 vm03.local ceph-mon[50983]: pgmap v3086: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:05.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:04 vm00.local ceph-mon[49980]: pgmap v3087: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:04 vm03.local ceph-mon[50983]: pgmap v3087: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:05.594 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:05.595 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:05.623 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:05.623 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:07 vm00.local ceph-mon[49980]: pgmap v3088: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:07 vm03.local ceph-mon[50983]: pgmap v3088: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:09 vm03.local ceph-mon[50983]: pgmap v3089: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:09 vm00.local ceph-mon[49980]: pgmap v3089: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:10.625 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:10.625 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:10.655 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:10.656 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:11 vm00.local ceph-mon[49980]: pgmap v3090: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:11 vm03.local ceph-mon[50983]: pgmap v3090: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:13.994 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:13 vm00.local ceph-mon[49980]: pgmap v3091: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:13 vm03.local ceph-mon[50983]: pgmap v3091: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:15.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:14 vm00.local ceph-mon[49980]: pgmap v3092: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:14 vm03.local ceph-mon[50983]: pgmap v3092: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:15.658 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:15.658 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:15.686 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:15.686 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:17 vm00.local ceph-mon[49980]: pgmap v3093: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:17 vm03.local ceph-mon[50983]: pgmap v3093: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:19 vm03.local ceph-mon[50983]: pgmap v3094: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:19 vm00.local ceph-mon[49980]: pgmap v3094: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:20.687 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:20.688 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:20.714 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:20.714 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:21 vm00.local ceph-mon[49980]: pgmap v3095: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:21 vm03.local ceph-mon[50983]: pgmap v3095: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: pgmap v3096: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:58:23.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: pgmap v3096: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:58:23.995 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:58:25.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:24 vm00.local ceph-mon[49980]: pgmap v3097: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:24 vm03.local ceph-mon[50983]: pgmap v3097: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:25.716 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:25.716 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:25.743 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:25.743 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:27 vm00.local ceph-mon[49980]: pgmap v3098: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:27 vm03.local ceph-mon[50983]: pgmap v3098: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:29 vm03.local ceph-mon[50983]: pgmap v3099: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:29 vm00.local ceph-mon[49980]: pgmap v3099: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:30.744 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:30.745 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:30.772 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:30.772 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:31 vm00.local ceph-mon[49980]: pgmap v3100: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:31 vm03.local ceph-mon[50983]: pgmap v3100: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:33.996 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:33 vm00.local ceph-mon[49980]: pgmap v3101: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:33 vm03.local ceph-mon[50983]: pgmap v3101: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:35.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:34 vm00.local ceph-mon[49980]: pgmap v3102: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:34 vm03.local ceph-mon[50983]: pgmap v3102: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:35.774 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:35.774 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:35.804 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:35.805 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:58:36.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:58:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:58:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:58:37.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:36 vm00.local ceph-mon[49980]: pgmap v3103: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:36 vm03.local ceph-mon[50983]: pgmap v3103: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:39.758 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:39 vm03.local ceph-mon[50983]: pgmap v3104: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:39 vm00.local ceph-mon[49980]: pgmap v3104: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:40.807 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:40.807 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:40.835 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:40.836 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:41 vm00.local ceph-mon[49980]: pgmap v3105: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:41 vm03.local ceph-mon[50983]: pgmap v3105: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:43.997 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:43 vm00.local ceph-mon[49980]: pgmap v3106: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:43 vm03.local ceph-mon[50983]: pgmap v3106: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:45.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:44 vm00.local ceph-mon[49980]: pgmap v3107: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:44 vm03.local ceph-mon[50983]: pgmap v3107: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:45.838 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:45.838 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:47 vm00.local ceph-mon[49980]: pgmap v3108: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:47 vm03.local ceph-mon[50983]: pgmap v3108: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:49 vm03.local ceph-mon[50983]: pgmap v3109: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:49 vm00.local ceph-mon[49980]: pgmap v3109: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:50.867 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:50.868 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:50.897 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:50.897 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:52.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:51 vm00.local ceph-mon[49980]: pgmap v3110: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:51 vm03.local ceph-mon[50983]: pgmap v3110: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:53.998 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:53 vm00.local ceph-mon[49980]: pgmap v3111: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:53 vm03.local ceph-mon[50983]: pgmap v3111: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:58:55.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:54 vm00.local ceph-mon[49980]: pgmap v3112: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:54 vm03.local ceph-mon[50983]: pgmap v3112: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:55.899 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:58:55.899 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:58:55.929 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:58:55.929 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:58:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:57 vm00.local ceph-mon[49980]: pgmap v3113: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:57 vm03.local ceph-mon[50983]: pgmap v3113: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:58:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:58:59 vm03.local ceph-mon[50983]: pgmap v3114: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:58:59 vm00.local ceph-mon[49980]: pgmap v3114: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:00.931 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:00.931 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:00.985 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:00.986 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:01 vm00.local ceph-mon[49980]: pgmap v3115: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:01 vm03.local ceph-mon[50983]: pgmap v3115: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:03.999 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:03 vm00.local ceph-mon[49980]: pgmap v3116: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:03 vm03.local ceph-mon[50983]: pgmap v3116: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:04 vm00.local ceph-mon[49980]: pgmap v3117: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:04 vm03.local ceph-mon[50983]: pgmap v3117: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:05.988 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:05.988 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:06.017 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:06.018 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:07 vm00.local ceph-mon[49980]: pgmap v3118: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:07 vm03.local ceph-mon[50983]: pgmap v3118: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:09 vm03.local ceph-mon[50983]: pgmap v3119: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:09 vm00.local ceph-mon[49980]: pgmap v3119: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:11.019 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:11.020 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:11.046 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:11.047 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:11 vm00.local ceph-mon[49980]: pgmap v3120: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:11 vm03.local ceph-mon[50983]: pgmap v3120: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:14.000 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:13 vm00.local ceph-mon[49980]: pgmap v3121: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:13 vm03.local ceph-mon[50983]: pgmap v3121: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:14 vm03.local ceph-mon[50983]: pgmap v3122: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:14 vm00.local ceph-mon[49980]: pgmap v3122: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:16.048 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:16.049 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:16.074 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:16.074 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:17 vm00.local ceph-mon[49980]: pgmap v3123: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:17 vm03.local ceph-mon[50983]: pgmap v3123: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:19 vm03.local ceph-mon[50983]: pgmap v3124: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:19 vm00.local ceph-mon[49980]: pgmap v3124: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:21.075 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:21.076 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:21.101 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:21.102 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:21 vm00.local ceph-mon[49980]: pgmap v3125: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:21 vm03.local ceph-mon[50983]: pgmap v3125: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:23.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:23 vm03.local ceph-mon[50983]: pgmap v3126: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:23.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:59:23.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:59:23.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:23 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:59:23.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:23 vm00.local ceph-mon[49980]: pgmap v3126: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:23.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:59:23.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:59:23.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:23 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:59:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:25 vm00.local ceph-mon[49980]: pgmap v3127: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:59:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:25.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:25.781 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:25 vm03.local ceph-mon[50983]: pgmap v3127: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:59:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T06:59:26.103 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:26.104 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:26.130 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:26.131 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:27 vm00.local ceph-mon[49980]: pgmap v3128: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:27 vm03.local ceph-mon[50983]: pgmap v3128: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:29 vm03.local ceph-mon[50983]: pgmap v3129: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:29 vm00.local ceph-mon[49980]: pgmap v3129: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:31.132 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:31.133 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:31.160 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:31.161 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:31 vm00.local ceph-mon[49980]: pgmap v3130: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:31 vm03.local ceph-mon[50983]: pgmap v3130: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:34.002 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:33 vm00.local ceph-mon[49980]: pgmap v3131: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:33 vm03.local ceph-mon[50983]: pgmap v3131: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:34 vm03.local ceph-mon[50983]: pgmap v3132: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:34 vm00.local ceph-mon[49980]: pgmap v3132: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:59:36.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:59:36.163 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:36.163 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:36.189 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:36.190 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:59:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T06:59:37.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:36 vm03.local ceph-mon[50983]: pgmap v3133: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:36 vm00.local ceph-mon[49980]: pgmap v3133: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:39.759 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:39 vm03.local ceph-mon[50983]: pgmap v3134: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:39 vm00.local ceph-mon[49980]: pgmap v3134: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:41.191 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:41.191 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:41.218 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:41.218 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:41 vm00.local ceph-mon[49980]: pgmap v3135: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:41 vm03.local ceph-mon[50983]: pgmap v3135: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:43.799 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:43 vm00.local ceph-mon[49980]: pgmap v3136: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:43 vm03.local ceph-mon[50983]: pgmap v3136: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:44 vm03.local ceph-mon[50983]: pgmap v3137: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:44 vm00.local ceph-mon[49980]: pgmap v3137: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:46.220 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:46.220 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:46.248 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:46.248 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:47 vm00.local ceph-mon[49980]: pgmap v3138: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:47 vm03.local ceph-mon[50983]: pgmap v3138: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:49 vm03.local ceph-mon[50983]: pgmap v3139: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:49 vm00.local ceph-mon[49980]: pgmap v3139: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:51.250 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:51.251 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:51.278 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:51.279 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:51 vm00.local ceph-mon[49980]: pgmap v3140: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:51 vm03.local ceph-mon[50983]: pgmap v3140: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:53.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:52 vm03.local ceph-mon[50983]: pgmap v3141: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:52 vm00.local ceph-mon[49980]: pgmap v3141: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T06:59:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:54 vm00.local ceph-mon[49980]: pgmap v3142: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:54 vm03.local ceph-mon[50983]: pgmap v3142: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:56.280 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T06:59:56.281 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T06:59:56.308 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T06:59:56.308 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T06:59:58.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:57 vm00.local ceph-mon[49980]: pgmap v3143: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:57 vm03.local ceph-mon[50983]: pgmap v3143: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T06:59:59.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 06:59:59 vm03.local ceph-mon[50983]: pgmap v3144: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 06:59:59 vm00.local ceph-mon[49980]: pgmap v3144: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:01.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T07:00:01.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T07:00:01.310 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:01.311 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:01.338 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:01.339 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:01 vm00.local ceph-mon[49980]: pgmap v3145: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:01 vm03.local ceph-mon[50983]: pgmap v3145: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:04.004 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:03 vm00.local ceph-mon[49980]: pgmap v3146: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:03 vm03.local ceph-mon[50983]: pgmap v3146: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:05.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:04 vm03.local ceph-mon[50983]: pgmap v3147: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:04 vm00.local ceph-mon[49980]: pgmap v3147: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:06.341 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:06.341 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:06.367 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:06.367 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:07 vm00.local ceph-mon[49980]: pgmap v3148: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:07 vm03.local ceph-mon[50983]: pgmap v3148: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:09.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:09 vm03.local ceph-mon[50983]: pgmap v3149: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:09 vm00.local ceph-mon[49980]: pgmap v3149: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:11.369 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:11.369 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:11.395 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:11.396 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:11 vm00.local ceph-mon[49980]: pgmap v3150: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:11 vm03.local ceph-mon[50983]: pgmap v3150: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:13.809 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:13 vm00.local ceph-mon[49980]: pgmap v3151: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:13 vm03.local ceph-mon[50983]: pgmap v3151: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:15.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:14 vm03.local ceph-mon[50983]: pgmap v3152: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:14 vm00.local ceph-mon[49980]: pgmap v3152: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:16.397 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:16.398 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:16.424 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:16.425 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:17 vm00.local ceph-mon[49980]: pgmap v3153: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:17 vm03.local ceph-mon[50983]: pgmap v3153: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:19 vm03.local ceph-mon[50983]: pgmap v3154: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:19 vm00.local ceph-mon[49980]: pgmap v3154: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:21.426 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:21.427 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:21.453 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:21.454 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:21 vm00.local ceph-mon[49980]: pgmap v3155: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:21 vm03.local ceph-mon[50983]: pgmap v3155: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:23.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:23 vm00.local ceph-mon[49980]: pgmap v3156: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:23 vm03.local ceph-mon[50983]: pgmap v3156: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:25.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:24 vm03.local ceph-mon[50983]: pgmap v3157: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:25.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:24 vm00.local ceph-mon[49980]: pgmap v3157: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:00:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:00:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:00:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:00:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:00:26.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:00:26.455 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:26.456 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:26.485 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:26.486 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:26 vm00.local ceph-mon[49980]: pgmap v3158: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:26 vm03.local ceph-mon[50983]: pgmap v3158: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:29.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:29 vm03.local ceph-mon[50983]: pgmap v3159: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:29 vm00.local ceph-mon[49980]: pgmap v3159: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:31.487 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:31.488 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:31.560 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:31.560 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:31 vm00.local ceph-mon[49980]: pgmap v3160: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:31 vm03.local ceph-mon[50983]: pgmap v3160: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:34.007 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:33 vm00.local ceph-mon[49980]: pgmap v3161: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:33 vm03.local ceph-mon[50983]: pgmap v3161: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:35.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:34 vm03.local ceph-mon[50983]: pgmap v3162: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:34 vm00.local ceph-mon[49980]: pgmap v3162: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:00:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:00:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:00:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:00:36.561 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:36.562 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:36.587 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:36.588 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:36 vm00.local ceph-mon[49980]: pgmap v3163: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:36 vm03.local ceph-mon[50983]: pgmap v3163: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:39 vm03.local ceph-mon[50983]: pgmap v3164: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:39 vm00.local ceph-mon[49980]: pgmap v3164: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:41.589 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:41.590 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:41.617 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:41.617 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:41 vm00.local ceph-mon[49980]: pgmap v3165: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:41 vm03.local ceph-mon[50983]: pgmap v3165: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:44.008 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:43 vm00.local ceph-mon[49980]: pgmap v3166: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:43 vm03.local ceph-mon[50983]: pgmap v3166: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:45.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:44 vm03.local ceph-mon[50983]: pgmap v3167: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:44 vm00.local ceph-mon[49980]: pgmap v3167: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:46.619 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:46.619 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:46.646 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:46.646 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:47 vm00.local ceph-mon[49980]: pgmap v3168: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:47 vm03.local ceph-mon[50983]: pgmap v3168: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:49.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:49 vm03.local ceph-mon[50983]: pgmap v3169: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:49 vm00.local ceph-mon[49980]: pgmap v3169: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:51.647 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:51.648 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:51.674 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:51.675 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:51 vm00.local ceph-mon[49980]: pgmap v3170: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:51 vm03.local ceph-mon[50983]: pgmap v3170: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:54.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:53 vm00.local ceph-mon[49980]: pgmap v3171: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:53 vm03.local ceph-mon[50983]: pgmap v3171: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:00:55.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:54 vm03.local ceph-mon[50983]: pgmap v3172: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:54 vm00.local ceph-mon[49980]: pgmap v3172: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:56.676 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:00:56.676 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:00:57.009 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:00:57.009 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:00:57.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:57 vm00.local ceph-mon[49980]: pgmap v3173: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:00:57.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:57 vm03.local ceph-mon[50983]: pgmap v3173: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:00:59 vm00.local ceph-mon[49980]: pgmap v3174: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:00:59 vm03.local ceph-mon[50983]: pgmap v3174: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:02.011 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:02.012 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:01 vm00.local ceph-mon[49980]: pgmap v3175: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:02.037 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:02.038 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:01 vm03.local ceph-mon[50983]: pgmap v3175: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:04.010 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:03 vm00.local ceph-mon[49980]: pgmap v3176: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:03 vm03.local ceph-mon[50983]: pgmap v3176: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:04 vm00.local ceph-mon[49980]: pgmap v3177: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:04 vm03.local ceph-mon[50983]: pgmap v3177: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:07.039 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:07.040 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:07.067 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:07.067 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:07 vm00.local ceph-mon[49980]: pgmap v3178: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:07 vm03.local ceph-mon[50983]: pgmap v3178: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:09 vm00.local ceph-mon[49980]: pgmap v3179: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:09 vm03.local ceph-mon[50983]: pgmap v3179: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:11 vm00.local ceph-mon[49980]: pgmap v3180: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:11 vm03.local ceph-mon[50983]: pgmap v3180: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:12.068 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:12.069 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:12.096 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:12.097 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:14.011 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:13 vm00.local ceph-mon[49980]: pgmap v3181: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:13 vm03.local ceph-mon[50983]: pgmap v3181: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:14 vm00.local ceph-mon[49980]: pgmap v3182: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:14 vm03.local ceph-mon[50983]: pgmap v3182: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:17.098 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:17.099 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:17.126 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:17.126 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:17 vm00.local ceph-mon[49980]: pgmap v3183: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:17 vm03.local ceph-mon[50983]: pgmap v3183: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:19.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:19 vm03.local ceph-mon[50983]: pgmap v3184: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:19 vm00.local ceph-mon[49980]: pgmap v3184: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:21 vm00.local ceph-mon[49980]: pgmap v3185: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:21 vm03.local ceph-mon[50983]: pgmap v3185: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:22.128 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:22.128 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:22.350 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:22.350 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:22 vm00.local ceph-mon[49980]: pgmap v3186: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:22 vm03.local ceph-mon[50983]: pgmap v3186: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:24 vm00.local ceph-mon[49980]: pgmap v3187: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:24 vm03.local ceph-mon[50983]: pgmap v3187: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:01:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:01:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:01:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:01:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:01:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:01:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:01:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:01:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:01:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:01:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:26 vm00.local ceph-mon[49980]: pgmap v3188: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:26 vm03.local ceph-mon[50983]: pgmap v3188: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:27.352 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:27.352 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:27.380 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:27.380 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:29 vm00.local ceph-mon[49980]: pgmap v3189: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:29 vm03.local ceph-mon[50983]: pgmap v3189: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:31 vm00.local ceph-mon[49980]: pgmap v3190: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:31 vm03.local ceph-mon[50983]: pgmap v3190: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:32.382 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:32.382 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:32.441 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:32.442 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:32 vm00.local ceph-mon[49980]: pgmap v3191: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:32 vm03.local ceph-mon[50983]: pgmap v3191: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:34 vm00.local ceph-mon[49980]: pgmap v3192: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:34 vm03.local ceph-mon[50983]: pgmap v3192: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:01:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:01:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:01:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:01:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:36 vm00.local ceph-mon[49980]: pgmap v3193: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:36 vm03.local ceph-mon[50983]: pgmap v3193: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:37.443 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:37.444 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:37.470 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:37.471 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:39.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:39 vm03.local ceph-mon[50983]: pgmap v3194: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:39 vm00.local ceph-mon[49980]: pgmap v3194: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:01:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:41 vm00.local ceph-mon[49980]: pgmap v3195: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:41 vm03.local ceph-mon[50983]: pgmap v3195: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:42.472 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:42.473 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:42.500 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:42.500 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:43.828 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:43 vm00.local ceph-mon[49980]: pgmap v3196: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:43 vm03.local ceph-mon[50983]: pgmap v3196: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:44 vm00.local ceph-mon[49980]: pgmap v3197: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T07:01:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:44 vm03.local ceph-mon[50983]: pgmap v3197: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T07:01:47.502 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:47.503 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:47.531 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:47.532 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:47 vm00.local ceph-mon[49980]: pgmap v3198: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T07:01:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:47 vm03.local ceph-mon[50983]: pgmap v3198: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T07:01:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:49 vm00.local ceph-mon[49980]: pgmap v3199: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:49 vm03.local ceph-mon[50983]: pgmap v3199: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:51 vm00.local ceph-mon[49980]: pgmap v3200: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T07:01:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:51 vm03.local ceph-mon[50983]: pgmap v3200: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T07:01:52.533 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:52.534 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:52.561 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:52.562 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:53.978 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:53 vm00.local ceph-mon[49980]: pgmap v3201: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:53 vm03.local ceph-mon[50983]: pgmap v3201: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:54 vm00.local ceph-mon[49980]: pgmap v3202: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:54 vm03.local ceph-mon[50983]: pgmap v3202: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:57.563 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:01:57.564 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:01:57.590 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:01:57.591 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:01:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:57 vm00.local ceph-mon[49980]: pgmap v3203: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:01:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:57 vm03.local ceph-mon[50983]: pgmap v3203: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:01:59 vm00.local ceph-mon[49980]: pgmap v3204: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:01:59 vm03.local ceph-mon[50983]: pgmap v3204: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:01 vm00.local ceph-mon[49980]: pgmap v3205: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:01 vm03.local ceph-mon[50983]: pgmap v3205: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:02.592 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:02.593 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:02.619 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:02.620 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:04.015 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:03 vm00.local ceph-mon[49980]: pgmap v3206: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:03 vm03.local ceph-mon[50983]: pgmap v3206: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:04 vm00.local ceph-mon[49980]: pgmap v3207: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:04 vm03.local ceph-mon[50983]: pgmap v3207: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:07.622 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:07.622 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:07.650 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:07.651 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:07 vm00.local ceph-mon[49980]: pgmap v3208: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:07 vm03.local ceph-mon[50983]: pgmap v3208: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:09 vm00.local ceph-mon[49980]: pgmap v3209: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:10.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:09 vm03.local ceph-mon[50983]: pgmap v3209: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:11 vm00.local ceph-mon[49980]: pgmap v3210: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:11 vm03.local ceph-mon[50983]: pgmap v3210: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:12.652 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:12.653 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:12.679 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:12.679 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:14.017 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:13 vm00.local ceph-mon[49980]: pgmap v3211: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:13 vm03.local ceph-mon[50983]: pgmap v3211: 97 pgs: 97 active+clean; 453 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:14 vm00.local ceph-mon[49980]: pgmap v3212: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:14 vm03.local ceph-mon[50983]: pgmap v3212: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:17.680 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:17.681 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:17.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:17.707 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:17 vm00.local ceph-mon[49980]: pgmap v3213: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:17 vm03.local ceph-mon[50983]: pgmap v3213: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:19 vm00.local ceph-mon[49980]: pgmap v3214: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:19 vm03.local ceph-mon[50983]: pgmap v3214: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:21 vm00.local ceph-mon[49980]: pgmap v3215: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:21 vm03.local ceph-mon[50983]: pgmap v3215: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:22.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:22.709 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:22.735 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:22.736 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:24.017 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:23 vm00.local ceph-mon[49980]: pgmap v3216: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:23 vm03.local ceph-mon[50983]: pgmap v3216: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:24 vm00.local ceph-mon[49980]: pgmap v3217: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:24 vm03.local ceph-mon[50983]: pgmap v3217: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:25.926 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:02:25.927 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:25.927 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:25.927 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:02:25.927 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:25 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:02:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:02:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:02:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:25 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:02:27.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:26 vm00.local ceph-mon[49980]: pgmap v3218: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:27.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:26 vm03.local ceph-mon[50983]: pgmap v3218: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:27.738 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:27.738 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:27.766 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:27.767 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:29 vm00.local ceph-mon[49980]: pgmap v3219: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:29 vm03.local ceph-mon[50983]: pgmap v3219: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:30 vm00.local ceph-mon[49980]: pgmap v3220: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:30 vm03.local ceph-mon[50983]: pgmap v3220: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:32.769 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:32.769 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:32.794 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:32.795 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:34.017 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:33 vm00.local ceph-mon[49980]: pgmap v3221: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:33 vm03.local ceph-mon[50983]: pgmap v3221: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:35 vm00.local ceph-mon[49980]: pgmap v3222: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:35 vm03.local ceph-mon[50983]: pgmap v3222: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:37 vm00.local ceph-mon[49980]: pgmap v3223: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:37.796 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:37.797 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:37 vm03.local ceph-mon[50983]: pgmap v3223: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:37.825 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:37.826 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:39 vm00.local ceph-mon[49980]: pgmap v3224: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:39 vm03.local ceph-mon[50983]: pgmap v3224: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:41 vm00.local ceph-mon[49980]: pgmap v3225: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:41 vm03.local ceph-mon[50983]: pgmap v3225: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:42.827 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:42.828 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:42.856 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:42.856 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:44.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:43 vm00.local ceph-mon[49980]: pgmap v3226: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:43 vm03.local ceph-mon[50983]: pgmap v3226: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:44 vm00.local ceph-mon[49980]: pgmap v3227: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:44 vm03.local ceph-mon[50983]: pgmap v3227: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:47.857 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:47.858 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:47.886 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:47.887 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:47 vm00.local ceph-mon[49980]: pgmap v3228: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:47 vm03.local ceph-mon[50983]: pgmap v3228: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:49 vm00.local ceph-mon[49980]: pgmap v3229: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:49 vm03.local ceph-mon[50983]: pgmap v3229: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:51 vm00.local ceph-mon[49980]: pgmap v3230: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:51 vm03.local ceph-mon[50983]: pgmap v3230: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:52.888 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:52.889 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:52.915 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:52.915 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:54.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:53 vm00.local ceph-mon[49980]: pgmap v3231: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:53 vm03.local ceph-mon[50983]: pgmap v3231: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:02:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:54 vm00.local ceph-mon[49980]: pgmap v3232: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:54 vm03.local ceph-mon[50983]: pgmap v3232: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:57.917 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:02:57.917 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:02:57.943 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:02:57.944 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:02:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:57 vm00.local ceph-mon[49980]: pgmap v3233: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:02:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:57 vm03.local ceph-mon[50983]: pgmap v3233: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:02:59 vm00.local ceph-mon[49980]: pgmap v3234: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:02:59 vm03.local ceph-mon[50983]: pgmap v3234: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:01 vm00.local ceph-mon[49980]: pgmap v3235: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:01 vm03.local ceph-mon[50983]: pgmap v3235: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:02.945 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:02.946 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:02.972 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:02.973 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:04.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:03 vm00.local ceph-mon[49980]: pgmap v3236: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:03 vm03.local ceph-mon[50983]: pgmap v3236: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:04 vm00.local ceph-mon[49980]: pgmap v3237: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:04 vm03.local ceph-mon[50983]: pgmap v3237: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:07.974 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:07.974 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:08.000 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:08.001 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:07 vm00.local ceph-mon[49980]: pgmap v3238: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:07 vm03.local ceph-mon[50983]: pgmap v3238: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:09 vm00.local ceph-mon[49980]: pgmap v3239: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:09 vm03.local ceph-mon[50983]: pgmap v3239: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:11 vm00.local ceph-mon[49980]: pgmap v3240: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:11 vm03.local ceph-mon[50983]: pgmap v3240: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:13.003 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:13.003 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:13.032 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:13.032 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:14.021 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:13 vm00.local ceph-mon[49980]: pgmap v3241: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:13 vm03.local ceph-mon[50983]: pgmap v3241: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:14 vm00.local ceph-mon[49980]: pgmap v3242: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:14 vm03.local ceph-mon[50983]: pgmap v3242: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:17 vm00.local ceph-mon[49980]: pgmap v3243: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:18.033 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:18.034 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:17 vm03.local ceph-mon[50983]: pgmap v3243: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:18.062 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:18.062 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:19 vm00.local ceph-mon[49980]: pgmap v3244: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:19 vm03.local ceph-mon[50983]: pgmap v3244: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:21 vm00.local ceph-mon[49980]: pgmap v3245: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:21 vm03.local ceph-mon[50983]: pgmap v3245: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:23.063 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:23.064 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:23.092 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:23.092 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:24.021 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:23 vm00.local ceph-mon[49980]: pgmap v3246: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:23 vm03.local ceph-mon[50983]: pgmap v3246: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:25 vm00.local ceph-mon[49980]: pgmap v3247: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:25 vm03.local ceph-mon[50983]: pgmap v3247: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:26.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:26.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:03:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:03:27.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:27 vm00.local ceph-mon[49980]: pgmap v3248: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:03:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:03:27.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:27 vm03.local ceph-mon[50983]: pgmap v3248: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:28.094 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:28.094 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:28.121 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:28.121 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:29 vm00.local ceph-mon[49980]: pgmap v3249: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:29 vm03.local ceph-mon[50983]: pgmap v3249: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:31 vm00.local ceph-mon[49980]: pgmap v3250: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:31 vm03.local ceph-mon[50983]: pgmap v3250: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:33.122 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:33.123 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:33.152 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:34.021 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:33 vm00.local ceph-mon[49980]: pgmap v3251: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:33 vm03.local ceph-mon[50983]: pgmap v3251: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:35 vm00.local ceph-mon[49980]: pgmap v3252: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:35 vm03.local ceph-mon[50983]: pgmap v3252: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:37 vm00.local ceph-mon[49980]: pgmap v3253: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:37 vm03.local ceph-mon[50983]: pgmap v3253: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:38.155 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:38.156 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:38.188 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:38.189 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:39 vm00.local ceph-mon[49980]: pgmap v3254: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:40.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:39 vm03.local ceph-mon[50983]: pgmap v3254: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:41 vm00.local ceph-mon[49980]: pgmap v3255: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:41 vm03.local ceph-mon[50983]: pgmap v3255: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:43.190 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:43.191 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:43.216 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:43.217 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:43.814 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:43 vm00.local ceph-mon[49980]: pgmap v3256: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:43 vm03.local ceph-mon[50983]: pgmap v3256: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:45.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:45 vm00.local ceph-mon[49980]: pgmap v3257: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:45.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:45 vm03.local ceph-mon[50983]: pgmap v3257: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:47 vm00.local ceph-mon[49980]: pgmap v3258: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:47 vm03.local ceph-mon[50983]: pgmap v3258: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:48.218 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:48.219 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:48.248 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:48.248 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:50.020 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:49 vm03.local ceph-mon[50983]: pgmap v3259: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:49 vm00.local ceph-mon[49980]: pgmap v3259: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:51 vm00.local ceph-mon[49980]: pgmap v3260: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:51 vm03.local ceph-mon[50983]: pgmap v3260: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:53.250 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:53.251 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:53.277 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:53.278 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:03:53.962 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:53 vm00.local ceph-mon[49980]: pgmap v3261: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:53 vm03.local ceph-mon[50983]: pgmap v3261: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:03:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:55 vm00.local ceph-mon[49980]: pgmap v3262: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:55.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:55 vm03.local ceph-mon[50983]: pgmap v3262: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:57 vm00.local ceph-mon[49980]: pgmap v3263: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:57 vm03.local ceph-mon[50983]: pgmap v3263: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:03:58.279 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:03:58.280 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:03:58.307 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:03:58.308 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:03:59 vm00.local ceph-mon[49980]: pgmap v3264: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:03:59 vm03.local ceph-mon[50983]: pgmap v3264: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:01 vm00.local ceph-mon[49980]: pgmap v3265: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:01 vm03.local ceph-mon[50983]: pgmap v3265: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:03 vm00.local ceph-mon[49980]: pgmap v3266: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:03 vm03.local ceph-mon[50983]: pgmap v3266: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:03.309 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:03.310 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:03.336 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:03.336 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:05.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:05 vm00.local ceph-mon[49980]: pgmap v3267: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:05.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:05 vm03.local ceph-mon[50983]: pgmap v3267: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:07 vm00.local ceph-mon[49980]: pgmap v3268: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:07 vm03.local ceph-mon[50983]: pgmap v3268: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:08.338 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:08.338 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:08.365 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:08.365 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:09 vm00.local ceph-mon[49980]: pgmap v3269: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:09 vm03.local ceph-mon[50983]: pgmap v3269: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:11 vm00.local ceph-mon[49980]: pgmap v3270: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:11 vm03.local ceph-mon[50983]: pgmap v3270: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:13.367 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:13.367 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:13.393 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:13.393 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:13 vm00.local ceph-mon[49980]: pgmap v3271: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:13 vm03.local ceph-mon[50983]: pgmap v3271: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:15.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:15 vm00.local ceph-mon[49980]: pgmap v3272: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:15.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:15 vm03.local ceph-mon[50983]: pgmap v3272: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:17 vm00.local ceph-mon[49980]: pgmap v3273: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:17 vm03.local ceph-mon[50983]: pgmap v3273: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:18.395 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:18.395 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:18.422 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:18.422 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:19 vm00.local ceph-mon[49980]: pgmap v3274: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:19 vm03.local ceph-mon[50983]: pgmap v3274: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:21 vm00.local ceph-mon[49980]: pgmap v3275: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:21 vm03.local ceph-mon[50983]: pgmap v3275: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:23.423 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:23.424 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:23.449 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:23.450 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:24.026 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:23 vm00.local ceph-mon[49980]: pgmap v3276: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:23 vm03.local ceph-mon[50983]: pgmap v3276: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:25.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:25 vm00.local ceph-mon[49980]: pgmap v3277: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:25.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:25 vm03.local ceph-mon[50983]: pgmap v3277: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:26.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:26.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:26.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:26.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:26.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:26.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:04:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:04:27.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:27 vm00.local ceph-mon[49980]: pgmap v3278: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:04:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:04:27.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:27 vm03.local ceph-mon[50983]: pgmap v3278: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:28.451 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:28.451 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:28.477 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:28.478 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:29 vm00.local ceph-mon[49980]: pgmap v3279: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:29 vm03.local ceph-mon[50983]: pgmap v3279: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:31 vm00.local ceph-mon[49980]: pgmap v3280: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:31 vm03.local ceph-mon[50983]: pgmap v3280: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:33.479 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:33.480 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:33.506 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:33.507 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:34.027 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:33 vm00.local ceph-mon[49980]: pgmap v3281: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:33 vm03.local ceph-mon[50983]: pgmap v3281: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:35.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:35 vm00.local ceph-mon[49980]: pgmap v3282: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:35.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:35 vm03.local ceph-mon[50983]: pgmap v3282: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:04:36.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:04:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:04:36.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:04:37.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:37 vm00.local ceph-mon[49980]: pgmap v3283: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:37.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:37 vm03.local ceph-mon[50983]: pgmap v3283: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:38.508 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:38.508 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:38.533 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:38.534 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:39 vm00.local ceph-mon[49980]: pgmap v3284: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:39 vm03.local ceph-mon[50983]: pgmap v3284: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:41 vm00.local ceph-mon[49980]: pgmap v3285: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:41 vm03.local ceph-mon[50983]: pgmap v3285: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:43.535 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:43.535 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:43.570 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:43.570 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:43.803 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:43 vm00.local ceph-mon[49980]: pgmap v3286: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:43 vm03.local ceph-mon[50983]: pgmap v3286: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:45 vm00.local ceph-mon[49980]: pgmap v3287: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:45 vm03.local ceph-mon[50983]: pgmap v3287: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:47 vm00.local ceph-mon[49980]: pgmap v3288: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:47 vm03.local ceph-mon[50983]: pgmap v3288: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:48.572 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:48.572 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:48.600 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:48.600 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:49 vm00.local ceph-mon[49980]: pgmap v3289: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:49 vm03.local ceph-mon[50983]: pgmap v3289: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:51 vm00.local ceph-mon[49980]: pgmap v3290: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:51 vm03.local ceph-mon[50983]: pgmap v3290: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:53.602 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:53.602 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:53.631 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:53.631 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:04:54.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:53 vm00.local ceph-mon[49980]: pgmap v3291: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:53 vm03.local ceph-mon[50983]: pgmap v3291: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:04:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:55 vm00.local ceph-mon[49980]: pgmap v3292: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:55 vm03.local ceph-mon[50983]: pgmap v3292: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:57.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:56 vm00.local ceph-mon[49980]: pgmap v3293: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:57.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:56 vm03.local ceph-mon[50983]: pgmap v3293: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:04:58.633 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:04:58.633 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:04:58.659 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:04:58.660 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:04:59 vm00.local ceph-mon[49980]: pgmap v3294: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:04:59 vm03.local ceph-mon[50983]: pgmap v3294: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:01 vm00.local ceph-mon[49980]: pgmap v3295: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:01 vm03.local ceph-mon[50983]: pgmap v3295: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:03.661 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:03.662 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:03.687 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:03.688 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:04.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:03 vm00.local ceph-mon[49980]: pgmap v3296: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:03 vm03.local ceph-mon[50983]: pgmap v3296: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:05 vm00.local ceph-mon[49980]: pgmap v3297: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:05 vm03.local ceph-mon[50983]: pgmap v3297: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:07 vm00.local ceph-mon[49980]: pgmap v3298: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:07 vm03.local ceph-mon[50983]: pgmap v3298: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:08.689 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:08.690 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:08.717 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:08.718 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:09 vm00.local ceph-mon[49980]: pgmap v3299: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:09 vm03.local ceph-mon[50983]: pgmap v3299: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:11 vm00.local ceph-mon[49980]: pgmap v3300: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:11 vm03.local ceph-mon[50983]: pgmap v3300: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:13.719 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:13.719 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:13.750 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:13.750 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:14.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:13 vm00.local ceph-mon[49980]: pgmap v3301: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:13 vm03.local ceph-mon[50983]: pgmap v3301: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:15 vm00.local ceph-mon[49980]: pgmap v3302: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:15 vm03.local ceph-mon[50983]: pgmap v3302: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:17 vm00.local ceph-mon[49980]: pgmap v3303: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:17 vm03.local ceph-mon[50983]: pgmap v3303: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:18.752 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:18.752 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:18.778 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:18.779 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:19.655 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:19 vm03.local ceph-mon[50983]: pgmap v3304: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:19 vm00.local ceph-mon[49980]: pgmap v3304: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:21 vm00.local ceph-mon[49980]: pgmap v3305: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:21 vm03.local ceph-mon[50983]: pgmap v3305: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:23.780 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:23.781 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:23.807 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:23.808 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:24.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:23 vm00.local ceph-mon[49980]: pgmap v3306: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:23 vm03.local ceph-mon[50983]: pgmap v3306: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:25 vm00.local ceph-mon[49980]: pgmap v3307: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:25 vm03.local ceph-mon[50983]: pgmap v3307: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:26.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:26 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:26.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:26 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:27 vm00.local ceph-mon[49980]: pgmap v3308: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:05:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:05:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:27 vm03.local ceph-mon[50983]: pgmap v3308: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:05:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:05:28.809 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:28.809 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:28.836 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:28.836 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:29 vm00.local ceph-mon[49980]: pgmap v3309: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:29 vm03.local ceph-mon[50983]: pgmap v3309: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:31 vm00.local ceph-mon[49980]: pgmap v3310: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:31 vm03.local ceph-mon[50983]: pgmap v3310: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:33.838 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:33.838 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:33.863 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:33.864 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:34.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:33 vm00.local ceph-mon[49980]: pgmap v3311: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:33 vm03.local ceph-mon[50983]: pgmap v3311: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:35 vm00.local ceph-mon[49980]: pgmap v3312: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:35 vm03.local ceph-mon[50983]: pgmap v3312: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:05:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:05:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:05:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:05:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:37 vm00.local ceph-mon[49980]: pgmap v3313: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:37 vm03.local ceph-mon[50983]: pgmap v3313: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:38.865 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:38.865 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:38.891 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:38.892 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:39 vm00.local ceph-mon[49980]: pgmap v3314: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:39 vm03.local ceph-mon[50983]: pgmap v3314: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:41 vm00.local ceph-mon[49980]: pgmap v3315: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:41 vm03.local ceph-mon[50983]: pgmap v3315: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:43.893 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:43.894 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:43.921 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:43.922 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:44.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:43 vm00.local ceph-mon[49980]: pgmap v3316: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:43 vm03.local ceph-mon[50983]: pgmap v3316: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:45 vm00.local ceph-mon[49980]: pgmap v3317: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:45 vm03.local ceph-mon[50983]: pgmap v3317: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:47 vm00.local ceph-mon[49980]: pgmap v3318: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:47 vm03.local ceph-mon[50983]: pgmap v3318: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:48.924 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:48.924 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:48.952 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:48.952 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:49 vm00.local ceph-mon[49980]: pgmap v3319: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:49 vm03.local ceph-mon[50983]: pgmap v3319: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:51 vm00.local ceph-mon[49980]: pgmap v3320: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:51 vm03.local ceph-mon[50983]: pgmap v3320: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:53.954 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:53.954 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:53.981 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:53.981 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:05:54.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:53 vm00.local ceph-mon[49980]: pgmap v3321: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:53 vm03.local ceph-mon[50983]: pgmap v3321: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:05:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:55 vm00.local ceph-mon[49980]: pgmap v3322: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:55 vm03.local ceph-mon[50983]: pgmap v3322: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:57 vm00.local ceph-mon[49980]: pgmap v3323: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:57 vm03.local ceph-mon[50983]: pgmap v3323: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:05:58.983 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:05:58.983 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:05:59.010 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:05:59.011 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:05:59 vm00.local ceph-mon[49980]: pgmap v3324: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:05:59 vm03.local ceph-mon[50983]: pgmap v3324: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:01 vm00.local ceph-mon[49980]: pgmap v3325: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:01 vm03.local ceph-mon[50983]: pgmap v3325: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:04.012 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:04.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:04.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:03 vm00.local ceph-mon[49980]: pgmap v3326: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:04.041 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:04.042 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:03 vm03.local ceph-mon[50983]: pgmap v3326: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:05 vm00.local ceph-mon[49980]: pgmap v3327: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:05 vm03.local ceph-mon[50983]: pgmap v3327: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:07 vm00.local ceph-mon[49980]: pgmap v3328: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:07 vm03.local ceph-mon[50983]: pgmap v3328: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:09.043 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:09.044 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:09.070 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:09.071 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:09 vm00.local ceph-mon[49980]: pgmap v3329: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:09 vm03.local ceph-mon[50983]: pgmap v3329: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:11 vm00.local ceph-mon[49980]: pgmap v3330: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:11 vm03.local ceph-mon[50983]: pgmap v3330: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:14.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:13 vm00.local ceph-mon[49980]: pgmap v3331: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:13 vm03.local ceph-mon[50983]: pgmap v3331: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:14.072 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:14.073 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:14.099 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:14.100 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:15 vm00.local ceph-mon[49980]: pgmap v3332: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:15 vm03.local ceph-mon[50983]: pgmap v3332: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:17 vm00.local ceph-mon[49980]: pgmap v3333: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:17 vm03.local ceph-mon[50983]: pgmap v3333: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:19.101 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:19.102 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:19.130 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:19.131 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:19 vm00.local ceph-mon[49980]: pgmap v3334: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:19 vm03.local ceph-mon[50983]: pgmap v3334: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:21 vm00.local ceph-mon[49980]: pgmap v3335: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:21 vm03.local ceph-mon[50983]: pgmap v3335: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:24.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:23 vm00.local ceph-mon[49980]: pgmap v3336: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:23 vm03.local ceph-mon[50983]: pgmap v3336: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:24.132 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:24.132 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:24.191 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:24.191 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:25 vm00.local ceph-mon[49980]: pgmap v3337: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:25 vm03.local ceph-mon[50983]: pgmap v3337: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:27 vm00.local ceph-mon[49980]: pgmap v3338: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:06:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:06:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:27 vm03.local ceph-mon[50983]: pgmap v3338: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:06:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:06:29.192 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:29.193 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:29.219 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:29.220 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:29 vm00.local ceph-mon[49980]: pgmap v3339: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:29 vm03.local ceph-mon[50983]: pgmap v3339: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:31 vm00.local ceph-mon[49980]: pgmap v3340: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:31 vm03.local ceph-mon[50983]: pgmap v3340: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:34.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:33 vm00.local ceph-mon[49980]: pgmap v3341: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:33 vm03.local ceph-mon[50983]: pgmap v3341: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:34.221 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:34.222 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:34.248 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:34.249 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:35 vm00.local ceph-mon[49980]: pgmap v3342: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:35 vm03.local ceph-mon[50983]: pgmap v3342: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:06:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:06:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:06:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:06:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:37 vm00.local ceph-mon[49980]: pgmap v3343: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:37 vm03.local ceph-mon[50983]: pgmap v3343: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:39.250 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:39.251 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:39.278 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:39.279 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:39 vm00.local ceph-mon[49980]: pgmap v3344: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:39 vm03.local ceph-mon[50983]: pgmap v3344: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:41 vm00.local ceph-mon[49980]: pgmap v3345: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:41 vm03.local ceph-mon[50983]: pgmap v3345: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:44.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:43 vm00.local ceph-mon[49980]: pgmap v3346: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:43 vm03.local ceph-mon[50983]: pgmap v3346: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:44.280 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:44.281 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:44.308 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:44.309 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:45 vm00.local ceph-mon[49980]: pgmap v3347: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:45 vm03.local ceph-mon[50983]: pgmap v3347: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:47 vm00.local ceph-mon[49980]: pgmap v3348: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:47 vm03.local ceph-mon[50983]: pgmap v3348: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:49.310 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:49.311 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:49.339 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:49.339 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:49 vm00.local ceph-mon[49980]: pgmap v3349: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:49 vm03.local ceph-mon[50983]: pgmap v3349: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:51 vm00.local ceph-mon[49980]: pgmap v3350: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:51 vm03.local ceph-mon[50983]: pgmap v3350: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:54.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:53 vm00.local ceph-mon[49980]: pgmap v3351: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:53 vm03.local ceph-mon[50983]: pgmap v3351: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:06:54.341 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:54.341 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:54.376 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:54.376 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:06:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:55 vm00.local ceph-mon[49980]: pgmap v3352: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:55 vm03.local ceph-mon[50983]: pgmap v3352: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:57 vm00.local ceph-mon[49980]: pgmap v3353: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:57 vm03.local ceph-mon[50983]: pgmap v3353: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:06:59.377 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:06:59.378 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:06:59.405 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:06:59.405 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:06:59 vm00.local ceph-mon[49980]: pgmap v3354: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:06:59 vm03.local ceph-mon[50983]: pgmap v3354: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:01 vm00.local ceph-mon[49980]: pgmap v3355: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:01 vm03.local ceph-mon[50983]: pgmap v3355: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:04.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:03 vm00.local ceph-mon[49980]: pgmap v3356: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:03 vm03.local ceph-mon[50983]: pgmap v3356: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:04.407 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:04.407 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:04.433 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:04.433 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:05 vm00.local ceph-mon[49980]: pgmap v3357: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:05 vm03.local ceph-mon[50983]: pgmap v3357: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:07 vm00.local ceph-mon[49980]: pgmap v3358: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:07 vm03.local ceph-mon[50983]: pgmap v3358: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:09.434 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:09.435 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:09.461 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:09.461 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:09 vm00.local ceph-mon[49980]: pgmap v3359: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:09 vm03.local ceph-mon[50983]: pgmap v3359: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:11 vm00.local ceph-mon[49980]: pgmap v3360: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:11 vm03.local ceph-mon[50983]: pgmap v3360: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:14.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:13 vm00.local ceph-mon[49980]: pgmap v3361: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:13 vm03.local ceph-mon[50983]: pgmap v3361: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:14.463 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:14.463 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:14.489 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:14.490 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:15 vm00.local ceph-mon[49980]: pgmap v3362: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:15 vm03.local ceph-mon[50983]: pgmap v3362: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:17 vm00.local ceph-mon[49980]: pgmap v3363: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:17 vm03.local ceph-mon[50983]: pgmap v3363: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:19.491 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:19.492 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:19.519 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:19.519 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:19 vm00.local ceph-mon[49980]: pgmap v3364: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:19 vm03.local ceph-mon[50983]: pgmap v3364: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:21 vm00.local ceph-mon[49980]: pgmap v3365: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:21 vm03.local ceph-mon[50983]: pgmap v3365: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:24.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:23 vm00.local ceph-mon[49980]: pgmap v3366: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:23 vm03.local ceph-mon[50983]: pgmap v3366: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:24.520 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:24.521 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:24.546 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:24.547 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:25 vm00.local ceph-mon[49980]: pgmap v3367: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:25 vm03.local ceph-mon[50983]: pgmap v3367: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:27 vm00.local ceph-mon[49980]: pgmap v3368: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:07:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:07:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:07:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:07:28.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:07:28.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:07:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:27 vm03.local ceph-mon[50983]: pgmap v3368: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:07:28.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:07:28.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:07:28.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:07:28.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:07:28.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:07:29.549 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:29.549 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:29.577 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:29.577 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:29 vm00.local ceph-mon[49980]: pgmap v3369: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:30.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:29 vm03.local ceph-mon[50983]: pgmap v3369: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:31 vm00.local ceph-mon[49980]: pgmap v3370: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:31 vm03.local ceph-mon[50983]: pgmap v3370: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:34.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:33 vm00.local ceph-mon[49980]: pgmap v3371: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:33 vm03.local ceph-mon[50983]: pgmap v3371: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:34.579 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:34.579 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:34.608 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:34.608 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:35 vm00.local ceph-mon[49980]: pgmap v3372: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:35 vm03.local ceph-mon[50983]: pgmap v3372: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:07:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:07:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:07:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:07:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:37 vm00.local ceph-mon[49980]: pgmap v3373: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:37 vm03.local ceph-mon[50983]: pgmap v3373: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:39.610 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:39.611 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:39.637 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:39.638 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:39 vm00.local ceph-mon[49980]: pgmap v3374: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:39 vm03.local ceph-mon[50983]: pgmap v3374: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:41 vm00.local ceph-mon[49980]: pgmap v3375: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:41 vm03.local ceph-mon[50983]: pgmap v3375: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:44.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:43 vm00.local ceph-mon[49980]: pgmap v3376: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:43 vm03.local ceph-mon[50983]: pgmap v3376: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:44.640 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:44.641 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:44.671 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:44.672 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:45.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:45 vm00.local ceph-mon[49980]: pgmap v3377: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:45 vm03.local ceph-mon[50983]: pgmap v3377: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:47 vm00.local ceph-mon[49980]: pgmap v3378: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:47 vm03.local ceph-mon[50983]: pgmap v3378: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:49.673 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:49.674 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:49.700 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:49.700 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:49 vm00.local ceph-mon[49980]: pgmap v3379: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:49 vm03.local ceph-mon[50983]: pgmap v3379: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:51 vm00.local ceph-mon[49980]: pgmap v3380: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:51 vm03.local ceph-mon[50983]: pgmap v3380: 97 pgs: 97 active+clean; 453 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:54.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:53 vm00.local ceph-mon[49980]: pgmap v3381: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:53 vm03.local ceph-mon[50983]: pgmap v3381: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:07:54.702 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:54.703 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:54.730 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:54.731 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:07:55.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:55 vm00.local ceph-mon[49980]: pgmap v3382: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:55 vm03.local ceph-mon[50983]: pgmap v3382: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:57 vm00.local ceph-mon[49980]: pgmap v3383: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:57 vm03.local ceph-mon[50983]: pgmap v3383: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:07:59.732 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:07:59.733 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:07:59.762 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:07:59.763 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:07:59 vm00.local ceph-mon[49980]: pgmap v3384: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:07:59 vm03.local ceph-mon[50983]: pgmap v3384: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:01 vm00.local ceph-mon[49980]: pgmap v3385: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:01 vm03.local ceph-mon[50983]: pgmap v3385: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:02 vm00.local ceph-mon[49980]: pgmap v3386: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:02 vm03.local ceph-mon[50983]: pgmap v3386: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:04.764 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:04.765 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:04.793 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:04.794 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:05 vm00.local ceph-mon[49980]: pgmap v3387: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:05 vm03.local ceph-mon[50983]: pgmap v3387: 97 pgs: 97 active+clean; 453 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:07 vm00.local ceph-mon[49980]: pgmap v3388: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:07 vm03.local ceph-mon[50983]: pgmap v3388: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:09.795 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:09.796 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:09.824 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:09.824 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:09 vm00.local ceph-mon[49980]: pgmap v3389: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:09 vm03.local ceph-mon[50983]: pgmap v3389: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:11 vm00.local ceph-mon[49980]: pgmap v3390: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:11 vm03.local ceph-mon[50983]: pgmap v3390: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:14.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:13 vm00.local ceph-mon[49980]: pgmap v3391: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:13 vm03.local ceph-mon[50983]: pgmap v3391: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:14.825 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:14.826 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:14.859 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:14.859 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:15 vm00.local ceph-mon[49980]: pgmap v3392: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:15 vm03.local ceph-mon[50983]: pgmap v3392: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:17 vm00.local ceph-mon[49980]: pgmap v3393: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:17 vm03.local ceph-mon[50983]: pgmap v3393: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:19.861 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:19.862 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:19.889 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:19.889 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:19 vm00.local ceph-mon[49980]: pgmap v3394: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:19 vm03.local ceph-mon[50983]: pgmap v3394: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:21 vm00.local ceph-mon[49980]: pgmap v3395: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:21 vm03.local ceph-mon[50983]: pgmap v3395: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:24.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:23 vm00.local ceph-mon[49980]: pgmap v3396: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:23 vm03.local ceph-mon[50983]: pgmap v3396: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:24.890 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:24.891 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:24.916 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:24.916 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:25.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:25 vm00.local ceph-mon[49980]: pgmap v3397: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:25.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:25 vm03.local ceph-mon[50983]: pgmap v3397: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:27.689 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:27 vm00.local ceph-mon[49980]: pgmap v3398: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:27.690 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:27 vm03.local ceph-mon[50983]: pgmap v3398: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:27.690 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:08:27.690 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:08:27.690 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:08:27.690 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:08:27.690 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:27 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:08:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:08:28.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:08:28.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:08:28.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:08:28.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:27 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:08:29.170 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:28 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:08:29.170 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:28 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:08:29.170 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:28 vm00.local ceph-mon[49980]: pgmap v3399: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:28 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:08:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:28 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:08:29.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:28 vm03.local ceph-mon[50983]: pgmap v3399: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:29.918 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:29.918 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:29.946 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:29.947 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:31 vm00.local ceph-mon[49980]: pgmap v3400: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:31 vm03.local ceph-mon[50983]: pgmap v3400: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:34.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:33 vm00.local ceph-mon[49980]: pgmap v3401: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:33 vm03.local ceph-mon[50983]: pgmap v3401: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:34.948 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:34.949 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:34.977 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:34.978 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:35 vm00.local ceph-mon[49980]: pgmap v3402: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:35 vm03.local ceph-mon[50983]: pgmap v3402: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:08:36.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:08:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:08:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:08:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:37 vm00.local ceph-mon[49980]: pgmap v3403: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:37 vm03.local ceph-mon[50983]: pgmap v3403: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:39.979 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:39.980 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:40.007 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:40.007 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:39 vm00.local ceph-mon[49980]: pgmap v3404: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:39 vm03.local ceph-mon[50983]: pgmap v3404: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:41 vm00.local ceph-mon[49980]: pgmap v3405: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:41 vm03.local ceph-mon[50983]: pgmap v3405: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:44.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:43 vm00.local ceph-mon[49980]: pgmap v3406: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:43 vm03.local ceph-mon[50983]: pgmap v3406: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:45.008 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:45.009 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:45.036 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:45.036 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:45 vm00.local ceph-mon[49980]: pgmap v3407: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:45 vm03.local ceph-mon[50983]: pgmap v3407: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:47 vm00.local ceph-mon[49980]: pgmap v3408: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:47 vm03.local ceph-mon[50983]: pgmap v3408: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:49 vm00.local ceph-mon[49980]: pgmap v3409: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:50.037 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:50.038 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:49 vm03.local ceph-mon[50983]: pgmap v3409: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:50.064 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:50.065 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:51 vm00.local ceph-mon[49980]: pgmap v3410: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:51 vm03.local ceph-mon[50983]: pgmap v3410: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:54.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:53 vm00.local ceph-mon[49980]: pgmap v3411: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:53 vm03.local ceph-mon[50983]: pgmap v3411: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:08:55.066 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:08:55.067 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:08:55.100 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:08:55.100 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:08:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:55 vm00.local ceph-mon[49980]: pgmap v3412: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:55 vm03.local ceph-mon[50983]: pgmap v3412: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:57 vm00.local ceph-mon[49980]: pgmap v3413: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:08:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:57 vm03.local ceph-mon[50983]: pgmap v3413: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:08:59 vm00.local ceph-mon[49980]: pgmap v3414: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:08:59 vm03.local ceph-mon[50983]: pgmap v3414: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:00.102 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:00.102 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:00.130 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:00.130 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:01 vm00.local ceph-mon[49980]: pgmap v3415: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:01 vm03.local ceph-mon[50983]: pgmap v3415: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:04.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:03 vm00.local ceph-mon[49980]: pgmap v3416: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:03 vm03.local ceph-mon[50983]: pgmap v3416: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:05.131 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:05.132 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:05.158 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:05.159 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:05.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:05 vm00.local ceph-mon[49980]: pgmap v3417: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:05.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:05 vm03.local ceph-mon[50983]: pgmap v3417: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:07 vm00.local ceph-mon[49980]: pgmap v3418: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:07 vm03.local ceph-mon[50983]: pgmap v3418: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:09 vm00.local ceph-mon[49980]: pgmap v3419: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:09 vm03.local ceph-mon[50983]: pgmap v3419: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:10.160 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:10.161 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:10.187 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:10.187 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:11 vm00.local ceph-mon[49980]: pgmap v3420: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:11 vm03.local ceph-mon[50983]: pgmap v3420: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:13.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:12 vm00.local ceph-mon[49980]: pgmap v3421: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:13.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:12 vm03.local ceph-mon[50983]: pgmap v3421: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:15.189 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:15.189 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:15.274 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:15.274 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:15 vm00.local ceph-mon[49980]: pgmap v3422: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:15 vm03.local ceph-mon[50983]: pgmap v3422: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:17 vm00.local ceph-mon[49980]: pgmap v3423: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:17 vm03.local ceph-mon[50983]: pgmap v3423: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:19 vm00.local ceph-mon[49980]: pgmap v3424: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:19 vm03.local ceph-mon[50983]: pgmap v3424: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:20.276 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:20.276 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:20.352 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:20.353 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:21 vm03.local ceph-mon[50983]: pgmap v3425: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:22.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:21 vm00.local ceph-mon[49980]: pgmap v3425: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:24.052 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:23 vm00.local ceph-mon[49980]: pgmap v3426: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:23 vm03.local ceph-mon[50983]: pgmap v3426: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:25.355 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:25.355 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:25.394 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:25.395 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:25 vm00.local ceph-mon[49980]: pgmap v3427: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:25 vm03.local ceph-mon[50983]: pgmap v3427: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:28.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:27 vm00.local ceph-mon[49980]: pgmap v3428: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:27 vm03.local ceph-mon[50983]: pgmap v3428: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:28.715 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:28 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:09:28.715 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:28 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:09:28.715 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:28 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:09:28.893 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:28 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:09:28.893 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:28 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:09:28.893 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:28 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:09:29.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:29 vm03.local ceph-mon[50983]: pgmap v3429: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:29.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:29.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:09:29.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:29.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:29.716 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:29 vm00.local ceph-mon[49980]: pgmap v3429: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:09:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:29.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:09:30.396 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:30.397 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:30.426 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:30.426 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:32.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:31 vm00.local ceph-mon[49980]: pgmap v3430: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:31 vm03.local ceph-mon[50983]: pgmap v3430: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:32 vm00.local ceph-mon[49980]: pgmap v3431: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:32 vm03.local ceph-mon[50983]: pgmap v3431: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:35.428 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:35.429 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:35.456 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:35.456 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:35.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:35 vm00.local ceph-mon[49980]: pgmap v3432: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:35.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:35 vm03.local ceph-mon[50983]: pgmap v3432: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:09:36.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:09:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:09:36.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:09:37.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:37 vm00.local ceph-mon[49980]: pgmap v3433: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:37.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:37 vm03.local ceph-mon[50983]: pgmap v3433: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:39.979 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:39 vm03.local ceph-mon[50983]: pgmap v3434: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:39 vm00.local ceph-mon[49980]: pgmap v3434: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:40.458 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:40.458 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:40.489 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:40.490 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:41 vm00.local ceph-mon[49980]: pgmap v3435: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:41 vm03.local ceph-mon[50983]: pgmap v3435: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:43.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:43 vm00.local ceph-mon[49980]: pgmap v3436: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:43 vm03.local ceph-mon[50983]: pgmap v3436: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:45.492 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:45.492 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:45.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:45 vm00.local ceph-mon[49980]: pgmap v3437: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:45.531 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:45.531 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:45.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:45 vm03.local ceph-mon[50983]: pgmap v3437: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:47 vm00.local ceph-mon[49980]: pgmap v3438: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:47 vm03.local ceph-mon[50983]: pgmap v3438: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:49 vm00.local ceph-mon[49980]: pgmap v3439: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:49 vm03.local ceph-mon[50983]: pgmap v3439: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:50.533 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:50.534 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:50.560 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:50.560 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:51 vm00.local ceph-mon[49980]: pgmap v3440: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:51 vm03.local ceph-mon[50983]: pgmap v3440: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:54.031 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:53 vm00.local ceph-mon[49980]: pgmap v3441: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:53 vm03.local ceph-mon[50983]: pgmap v3441: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:09:55.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:55 vm00.local ceph-mon[49980]: pgmap v3442: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:55.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:55 vm03.local ceph-mon[50983]: pgmap v3442: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:55.561 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:09:55.562 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:09:55.590 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:09:55.590 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:09:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:57 vm00.local ceph-mon[49980]: pgmap v3443: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:09:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:57 vm03.local ceph-mon[50983]: pgmap v3443: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:09:59 vm00.local ceph-mon[49980]: pgmap v3444: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:09:59 vm03.local ceph-mon[50983]: pgmap v3444: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:00.591 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:00.592 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:00.618 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:00.618 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:01.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:00 vm00.local ceph-mon[49980]: overall HEALTH_OK 2026-03-10T07:10:01.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:00 vm03.local ceph-mon[50983]: overall HEALTH_OK 2026-03-10T07:10:02.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:01 vm00.local ceph-mon[49980]: pgmap v3445: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:02.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:01 vm03.local ceph-mon[50983]: pgmap v3445: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:03.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:02 vm00.local ceph-mon[49980]: pgmap v3446: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:03.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:02 vm03.local ceph-mon[50983]: pgmap v3446: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:05.620 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:05.620 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:05.690 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:05.691 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:05 vm00.local ceph-mon[49980]: pgmap v3447: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:05 vm03.local ceph-mon[50983]: pgmap v3447: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:07 vm00.local ceph-mon[49980]: pgmap v3448: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:07 vm03.local ceph-mon[50983]: pgmap v3448: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:09 vm00.local ceph-mon[49980]: pgmap v3449: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:09 vm03.local ceph-mon[50983]: pgmap v3449: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:10.692 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:10.693 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:10.744 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:10.744 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:11 vm00.local ceph-mon[49980]: pgmap v3450: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:11 vm03.local ceph-mon[50983]: pgmap v3450: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:13.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:13 vm00.local ceph-mon[49980]: pgmap v3451: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:13 vm03.local ceph-mon[50983]: pgmap v3451: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:15.530 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:15 vm00.local ceph-mon[49980]: pgmap v3452: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:15.556 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:15 vm03.local ceph-mon[50983]: pgmap v3452: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:15.745 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:15.746 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:15.772 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:15.773 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:17 vm03.local ceph-mon[50983]: pgmap v3453: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:18.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:17 vm00.local ceph-mon[49980]: pgmap v3453: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:19 vm03.local ceph-mon[50983]: pgmap v3454: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:20.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:19 vm00.local ceph-mon[49980]: pgmap v3454: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:20.774 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:20.775 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:20.800 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:20.801 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:21 vm03.local ceph-mon[50983]: pgmap v3455: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:22.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:21 vm00.local ceph-mon[49980]: pgmap v3455: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:23.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:22 vm00.local ceph-mon[49980]: pgmap v3456: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:23.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:22 vm03.local ceph-mon[50983]: pgmap v3456: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:25 vm00.local ceph-mon[49980]: pgmap v3457: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:25.802 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:25.802 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:25 vm03.local ceph-mon[50983]: pgmap v3457: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:25.830 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:25.830 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:27 vm00.local ceph-mon[49980]: pgmap v3458: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:27 vm03.local ceph-mon[50983]: pgmap v3458: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:29.722 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:29 vm03.local ceph-mon[50983]: pgmap v3459: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:29.722 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:10:29.722 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:10:29.722 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:29 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:10:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:29 vm00.local ceph-mon[49980]: pgmap v3459: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:10:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:10:30.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:29 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:10:30.832 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:30.833 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:30.858 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:30.859 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:10:31.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:10:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:10:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:10:32.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:31 vm03.local ceph-mon[50983]: pgmap v3460: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:32.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:31 vm00.local ceph-mon[49980]: pgmap v3460: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:33.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:32 vm03.local ceph-mon[50983]: pgmap v3461: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:33.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:32 vm00.local ceph-mon[49980]: pgmap v3461: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:35 vm00.local ceph-mon[49980]: pgmap v3462: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:35 vm03.local ceph-mon[50983]: pgmap v3462: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:35.860 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:35.861 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:35.890 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:35.890 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:10:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:10:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:10:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:10:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:37 vm00.local ceph-mon[49980]: pgmap v3463: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:37 vm03.local ceph-mon[50983]: pgmap v3463: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:39 vm00.local ceph-mon[49980]: pgmap v3464: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:39 vm03.local ceph-mon[50983]: pgmap v3464: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:40.891 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:40.892 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:40.919 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:40.920 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:41 vm00.local ceph-mon[49980]: pgmap v3465: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:41 vm03.local ceph-mon[50983]: pgmap v3465: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:44.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:43 vm00.local ceph-mon[49980]: pgmap v3466: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:43 vm03.local ceph-mon[50983]: pgmap v3466: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:45 vm00.local ceph-mon[49980]: pgmap v3467: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:45 vm03.local ceph-mon[50983]: pgmap v3467: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:45.921 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:45.922 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:45.949 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:45.950 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:47 vm00.local ceph-mon[49980]: pgmap v3468: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:47 vm03.local ceph-mon[50983]: pgmap v3468: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:49 vm00.local ceph-mon[49980]: pgmap v3469: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:50.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:49 vm03.local ceph-mon[50983]: pgmap v3469: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:50.951 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:50.952 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:50.978 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:50.979 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:51 vm00.local ceph-mon[49980]: pgmap v3470: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:51 vm03.local ceph-mon[50983]: pgmap v3470: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:54.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:53 vm00.local ceph-mon[49980]: pgmap v3471: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:54.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:53 vm03.local ceph-mon[50983]: pgmap v3471: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:10:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:55 vm00.local ceph-mon[49980]: pgmap v3472: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:55 vm03.local ceph-mon[50983]: pgmap v3472: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:55.980 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:10:55.981 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:10:56.009 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:10:56.010 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:10:58.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:57 vm00.local ceph-mon[49980]: pgmap v3473: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:10:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:57 vm03.local ceph-mon[50983]: pgmap v3473: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:00.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:10:59 vm00.local ceph-mon[49980]: pgmap v3474: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:00.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:10:59 vm03.local ceph-mon[50983]: pgmap v3474: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:01.011 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:01.012 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:01.038 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:01.039 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:01 vm00.local ceph-mon[49980]: pgmap v3475: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:01 vm03.local ceph-mon[50983]: pgmap v3475: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:04.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:03 vm00.local ceph-mon[49980]: pgmap v3476: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:04.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:03 vm03.local ceph-mon[50983]: pgmap v3476: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:05 vm00.local ceph-mon[49980]: pgmap v3477: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:05 vm03.local ceph-mon[50983]: pgmap v3477: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:06.041 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:06.041 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:06.072 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:06.072 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:07 vm00.local ceph-mon[49980]: pgmap v3478: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:07 vm03.local ceph-mon[50983]: pgmap v3478: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:09 vm00.local ceph-mon[49980]: pgmap v3479: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:10.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:09 vm03.local ceph-mon[50983]: pgmap v3479: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:11.074 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:11.074 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:11.102 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:11.103 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:11 vm00.local ceph-mon[49980]: pgmap v3480: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:11 vm03.local ceph-mon[50983]: pgmap v3480: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:14.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:13 vm00.local ceph-mon[49980]: pgmap v3481: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:13 vm03.local ceph-mon[50983]: pgmap v3481: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:15 vm00.local ceph-mon[49980]: pgmap v3482: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:15 vm03.local ceph-mon[50983]: pgmap v3482: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:16.104 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:16.104 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:16.132 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:16.133 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:17 vm00.local ceph-mon[49980]: pgmap v3483: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:17 vm03.local ceph-mon[50983]: pgmap v3483: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:19 vm00.local ceph-mon[49980]: pgmap v3484: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:19 vm03.local ceph-mon[50983]: pgmap v3484: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:21.134 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:21.135 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:21.190 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:21.190 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:21 vm00.local ceph-mon[49980]: pgmap v3485: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:21 vm03.local ceph-mon[50983]: pgmap v3485: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:24.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:23 vm00.local ceph-mon[49980]: pgmap v3486: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:23 vm03.local ceph-mon[50983]: pgmap v3486: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:25.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:25 vm00.local ceph-mon[49980]: pgmap v3487: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:25.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:25 vm03.local ceph-mon[50983]: pgmap v3487: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:26.191 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:26.192 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:26.219 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:26.220 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:27 vm00.local ceph-mon[49980]: pgmap v3488: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:27 vm03.local ceph-mon[50983]: pgmap v3488: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:30.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:29 vm00.local ceph-mon[49980]: pgmap v3489: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:30.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:29 vm03.local ceph-mon[50983]: pgmap v3489: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:31.221 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:31.222 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:31.263 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:31.263 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:11:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:11:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:11:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:11:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:11:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:11:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:11:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:11:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:11:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:11:32.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:31 vm00.local ceph-mon[49980]: pgmap v3490: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:32.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:31 vm03.local ceph-mon[50983]: pgmap v3490: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:33.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:32 vm00.local ceph-mon[49980]: pgmap v3491: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:33.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:32 vm03.local ceph-mon[50983]: pgmap v3491: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:35 vm00.local ceph-mon[49980]: pgmap v3492: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:35 vm03.local ceph-mon[50983]: pgmap v3492: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:36.265 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:36.265 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:36.292 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:36.293 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:11:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:11:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:11:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:11:37.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:37 vm00.local ceph-mon[49980]: pgmap v3493: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:37.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:37 vm03.local ceph-mon[50983]: pgmap v3493: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:40.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:39 vm00.local ceph-mon[49980]: pgmap v3494: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:39 vm03.local ceph-mon[50983]: pgmap v3494: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:41.294 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:41.295 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:41.322 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:41.323 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:42.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:41 vm00.local ceph-mon[49980]: pgmap v3495: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:42.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:41 vm03.local ceph-mon[50983]: pgmap v3495: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:44.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:43 vm00.local ceph-mon[49980]: pgmap v3496: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:44.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:43 vm03.local ceph-mon[50983]: pgmap v3496: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:45.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:45 vm00.local ceph-mon[49980]: pgmap v3497: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:45.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:45 vm03.local ceph-mon[50983]: pgmap v3497: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:46.324 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:46.325 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:46.351 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:46.352 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:48.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:47 vm00.local ceph-mon[49980]: pgmap v3498: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:48.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:47 vm03.local ceph-mon[50983]: pgmap v3498: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:49.986 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:49 vm03.local ceph-mon[50983]: pgmap v3499: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:50.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:49 vm00.local ceph-mon[49980]: pgmap v3499: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:51.353 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:51.354 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:51.382 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:51.383 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:52.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:51 vm00.local ceph-mon[49980]: pgmap v3500: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:52.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:51 vm03.local ceph-mon[50983]: pgmap v3500: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:54.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:53 vm00.local ceph-mon[49980]: pgmap v3501: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:54.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:53 vm03.local ceph-mon[50983]: pgmap v3501: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:55.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:55 vm00.local ceph-mon[49980]: pgmap v3502: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:55.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:55 vm03.local ceph-mon[50983]: pgmap v3502: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:56.384 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:11:56.385 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:11:56.411 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:11:56.412 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:11:58.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:57 vm00.local ceph-mon[49980]: pgmap v3503: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:58.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:57 vm03.local ceph-mon[50983]: pgmap v3503: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:11:59.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:11:58 vm00.local ceph-mon[49980]: pgmap v3504: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:11:59.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:11:58 vm03.local ceph-mon[50983]: pgmap v3504: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:01.413 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:01.413 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:01.440 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:01.441 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:02.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:01 vm00.local ceph-mon[49980]: pgmap v3505: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:02.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:01 vm03.local ceph-mon[50983]: pgmap v3505: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:04.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:03 vm00.local ceph-mon[49980]: pgmap v3506: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:04.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:03 vm03.local ceph-mon[50983]: pgmap v3506: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:05.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:05 vm00.local ceph-mon[49980]: pgmap v3507: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:05.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:05 vm03.local ceph-mon[50983]: pgmap v3507: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:06.442 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:06.443 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:06.470 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:06.470 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:08.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:07 vm00.local ceph-mon[49980]: pgmap v3508: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:08.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:07 vm03.local ceph-mon[50983]: pgmap v3508: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:10.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:09 vm00.local ceph-mon[49980]: pgmap v3509: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:10.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:09 vm03.local ceph-mon[50983]: pgmap v3509: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:11.472 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:11.472 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:11.500 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:11.500 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:12.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:11 vm00.local ceph-mon[49980]: pgmap v3510: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:12.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:11 vm03.local ceph-mon[50983]: pgmap v3510: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:14.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:13 vm00.local ceph-mon[49980]: pgmap v3511: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:14.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:13 vm03.local ceph-mon[50983]: pgmap v3511: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:15.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:15 vm00.local ceph-mon[49980]: pgmap v3512: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:15.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:15 vm03.local ceph-mon[50983]: pgmap v3512: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:16.501 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:16.502 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:16.528 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:16.529 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:18.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:17 vm00.local ceph-mon[49980]: pgmap v3513: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:18.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:17 vm03.local ceph-mon[50983]: pgmap v3513: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:20.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:19 vm00.local ceph-mon[49980]: pgmap v3514: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:20.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:19 vm03.local ceph-mon[50983]: pgmap v3514: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:21.530 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:21.530 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:21.559 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:21.559 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:22.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:21 vm00.local ceph-mon[49980]: pgmap v3515: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:22.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:21 vm03.local ceph-mon[50983]: pgmap v3515: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:24.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:23 vm00.local ceph-mon[49980]: pgmap v3516: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:24.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:23 vm03.local ceph-mon[50983]: pgmap v3516: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:26.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:25 vm00.local ceph-mon[49980]: pgmap v3517: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:26.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:25 vm03.local ceph-mon[50983]: pgmap v3517: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:26.561 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:26.561 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:26.587 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:26.587 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:28.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:27 vm00.local ceph-mon[49980]: pgmap v3518: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:28.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:27 vm03.local ceph-mon[50983]: pgmap v3518: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:30.281 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:29 vm00.local ceph-mon[49980]: pgmap v3519: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:30.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:29 vm03.local ceph-mon[50983]: pgmap v3519: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:12:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:12:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:12:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:12:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:30 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:12:31.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:30 vm00.local ceph-mon[49980]: pgmap v3520: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:12:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:12:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:12:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:12:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:30 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' 2026-03-10T07:12:31.306 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:30 vm03.local ceph-mon[50983]: pgmap v3520: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:31.589 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:31.589 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:31.616 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:31.616 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:34.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:33 vm00.local ceph-mon[49980]: pgmap v3521: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:34.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:33 vm03.local ceph-mon[50983]: pgmap v3521: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:35 vm00.local ceph-mon[49980]: pgmap v3522: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:35.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:35 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:12:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:35 vm03.local ceph-mon[50983]: pgmap v3522: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:35.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:35 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:12:36.617 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-10T07:12:36.618 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-10T07:12:36.767 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-10T07:12:36.767 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-10T07:12:36.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:36 vm00.local ceph-mon[49980]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:12:36.806 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:36 vm03.local ceph-mon[50983]: from='mgr.14214 192.168.123.100:0/4247496709' entity='mgr.vm00.vnepyw' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vnepyw/trash_purge_schedule"}]: dispatch 2026-03-10T07:12:38.030 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:37 vm00.local ceph-mon[49980]: pgmap v3523: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:38.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:37 vm03.local ceph-mon[50983]: pgmap v3523: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T07:12:40.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 07:12:39 vm03.local ceph-mon[50983]: pgmap v3524: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:40.280 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 10 07:12:39 vm00.local ceph-mon[49980]: pgmap v3524: 97 pgs: 97 active+clean; 453 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T07:12:41.135 DEBUG:teuthology.exit:Got signal 15; running 1 handler... 2026-03-10T07:12:41.135 DEBUG:teuthology.exit:Finished running handlers